summaryrefslogtreecommitdiff
path: root/analysis/learning.R
diff options
context:
space:
mode:
Diffstat (limited to 'analysis/learning.R')
-rw-r--r--analysis/learning.R103
1 files changed, 92 insertions, 11 deletions
diff --git a/analysis/learning.R b/analysis/learning.R
index d21b8cd..ff31d33 100644
--- a/analysis/learning.R
+++ b/analysis/learning.R
@@ -1,8 +1,10 @@
library("tidyverse")
+options(dplyr.summarise.inform = FALSE)
library("class")
library("rpart")
library("rpart.plot")
library("viridis")
+library("MLmetrics")
## Simulation Parameters:
## simkey {baseline,extended,hint,hintandextended}
@@ -24,16 +26,95 @@ nsimkey=4
nsimulations=nseed*nwakeupfor*nwireless*nsimkey # Must be 3200
## Load data
-data=read_csv("../CCGRID2022.csv")%>%distinct() # Note that in the data experiment wireless=="lora",seed==1,wakeupfor==60,simkey=="baseline" is present 2 times in the CSV file
-tmp_data_coverage=data%>%group_by(simkey,wireless,wakeupfor,seed)%>%mutate(coverage=sum(nDataRcv))%>%ungroup()%>%filter(isSender==1)%>%select(simkey,wireless,wakeupfor,seed,coverage)
-data_seed_isSender=data%>%group_by(simkey,wireless,wakeupfor,seed,isSender)%>%summarize(energy_mean=mean(energy))%>%
- left_join(tmp_data_coverage,by=c("simkey","wireless","wakeupfor","seed"))%>%
- mutate(efficiency=energy_mean/coverage)%>%
- ungroup()
+data=suppressMessages(read_csv("../CCGRID2022.csv"))%>%distinct() # Note that in the data experiment wireless=="lora",seed==1,wakeupfor==60,simkey=="baseline" is present 2 times in the CSV file
data_seed=data%>%group_by(simkey,wireless,wakeupfor,seed)%>%summarize(energy=sum(energy),coverage=sum(nDataRcv))%>%
mutate(efficiency=energy/coverage)%>%
ungroup()
+
+F1_Score2=function(truth, pred){
+ result=sapply(c("baseline","extended","hint","hintandextended"),function(c){
+ cur_truth=truth[truth==c]
+ cur_pred=pred[truth==c]
+ col=paste0("f1_",c)
+ score=F1_Score(cur_truth,cur_pred)
+ if(is.nan(score)){score=0}
+ list(tibble(!!col:=score))
+ })
+ do.call("cbind",result)
+}
+
+build_models=function(ignore_hint=TRUE){
+ ## Prepare data for traning
+ set.seed(1) # Reproducibility
+ wireless_map=c("lora"=1,"nbiot"=2)
+ data_ml=data_seed%>%select(-efficiency,-seed)%>%mutate(wireless=wireless_map[data_seed$wireless])
+ if(ignore_hint){
+ data_ml=data_ml%>%filter(simkey!="hint")
+ }
+ train_set=data_ml%>%sample_frac(0.8) # 80% of the data
+ test_set=data_ml%>%suppressMessages(anti_join(train_set)) # 20% of the data
+
+ ## KNN predict function
+ knn_fn=function(inputs){
+ as.vector(knn(train=train_set%>%select(-simkey),test=inputs%>%select(-simkey),cl=train_set$simkey,k=10))
+ }
+
+ ## Decision tree
+ tree=rpart(
+ simkey ~ wireless + wakeupfor + energy + coverage,
+ data=train_set,
+ method="class",
+ minsplit=60,
+ minbucket=1)
+ ## Tree predict function
+ tree_fn=function(inputs){
+ as.vector(predict(tree,newdata=inputs%>%select(-simkey),type="class"))
+ }
+
+ ## Build models
+ models=list(predict_knn=knn_fn,predict_tree=tree_fn)
+
+ ## Computer performances
+ perfs=sapply(seq(1,20),function(i){
+ ## Prepare data for traning
+ set.seed(1) # Reproducibility
+ wireless_map=c("lora"=1,"nbiot"=2)
+ data_ml=data_seed%>%select(-efficiency,-seed)%>%mutate(wireless=wireless_map[data_seed$wireless])
+ if(ignore_hint){
+ data_ml=data_ml%>%filter(simkey!="hint")
+ }
+ train_set=data_ml%>%sample_frac(0.8) # 80% of the data
+ test_set=data_ml%>%suppressMessages(anti_join(train_set)) # 20% of the data
+
+ ## KNN
+ knn_predictions=as.vector(knn(train=train_set%>%select(-simkey),test=test_set%>%select(-simkey),cl=train_set$simkey,k=10))
+ ## Decision tree
+ tree=rpart(
+ simkey ~ wireless + wakeupfor + energy + coverage,
+ data=train_set,
+ method="class",
+ minsplit=60,
+ minbucket=1)
+ tree_predictions=as.vector(predict(tree,newdata=test_set%>%select(-simkey),type="class"))
+
+ ## Prefs
+ f1_knn=F1_Score2(test_set$simkey,knn_predictions)
+ f1_tree=F1_Score2(test_set$simkey,tree_predictions)
+ list(cbind(tibble(model=c("knn","tree")),rbind(f1_knn,f1_tree)))
+ })
+ perfs=do.call("rbind",perfs)%>%mutate_if(is.numeric, ~round(.,digits=2))
+ perfs=perfs%>%group_by(model)%>%summarize(
+ f1_baseline=mean(f1_baseline),
+ f1_hint=mean(f1_hint),
+ f1_extended=mean(f1_extended),
+ f1_hintandextended=mean(f1_hintandextended))
+ write.csv(perfs,"figures/f1_scores_offline.csv",quote=FALSE,row.names=FALSE)
+
+ ## Return models
+ models
+}
+
generate_inputs=function(ignore_hint=FALSE) {
## Prepare data for traning
set.seed(1) # Reproducibility
@@ -61,7 +142,7 @@ generate_inputs=function(ignore_hint=FALSE) {
minbucket=1)
tree_predictions=predict(tree,newdata=test_set%>%select(-simkey),type="class")
tree_cont_table=table(tree_predictions,test_set$simkey)
- tree_accuracy=round((sum(diag(tree_cont_table)/sum(rowSums(tree_cont_table))))*100)
+ tree_accuracy=(sum(diag(tree_cont_table)/sum(rowSums(tree_cont_table))))
tree_prop_table=round(prop.table(tree_cont_table),digits=2)
## Elbow plot
@@ -69,12 +150,12 @@ generate_inputs=function(ignore_hint=FALSE) {
knn_predictions=knn(train=train_set%>%select(-simkey),test=test_set%>%select(-simkey),cl=train_set$simkey,k=kvalue)
## KNN analysis
knn_cont_table=table(knn_predictions,test_set$simkey)
- knn_accuracy=round((sum(diag(knn_cont_table)/sum(rowSums(knn_cont_table))))*100)
+ knn_accuracy=(sum(diag(knn_cont_table)/sum(rowSums(knn_cont_table))))
knn_prop_table=round(prop.table(knn_cont_table),digits=2)
tibble(k=kvalue,accuracy=knn_accuracy)
})
elbow_data=do.call("rbind",elbow_data)
- ggplot(data=elbow_data,aes(k,accuracy))+geom_point()+geom_line()+ggtitle(paste("K-elbow for with NoHint to",as.character(ignore_hint)))
+ ggplot(data=elbow_data,aes(k,accuracy))+geom_point()+geom_line()+ggtitle(paste("K-elbow for with NoHint to",as.character(ignore_hint)))+ylim(c(0,1))
ggsave(paste0("figures/knn_elbow_NoHintIs",as.character(ignore_hint),".pdf"))
## Prints
@@ -126,5 +207,5 @@ generate_inputs=function(ignore_hint=FALSE) {
}
## Generate inputs
-generate_inputs(FALSE)
-generate_inputs(TRUE)
+#generate_inputs(FALSE)
+#generate_inputs(TRUE)