{
"id": {
},
"info": {
},
"costs": {
},
"livestock": {
"animals": {
"DCOW": {
"description": "Dairy cows and Bufallo (2013 and bef.)",
"columns": {
"LEVL": "LCOWBUFDAIR_AN_X",
"OV": "LCOWBUFDAIR_OV_X",
"CV": "LCOWBUFDAIR_OV_X + LCOWBUFDAIR_CV_X",
"PV": "LCOWBUFDAIR_PV_X",
"PN": "LCOWBUFDAIR_PN_X",
"SV": "LCOWBUFDAIR_SV_X",
"SN": "LCOWBUFDAIR_SN_X",
"FCV": "NA",
"FCN": "NA",
"FUV": "NA",
"FUN": "NA"
}
}
},
"production": {}
},
"subsidies": {
},
"crops": {
"SWHE": {
"columns": {
"UVSA": "NA"
}
},
"DWHE": {
"columns": {
"UVSA": "NA"
}
},
"RYEM": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"BARL": {
"columns": {
"UVSA": "NA"
}
},
"OATS": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"MAIZ": {
"columns": {
"UVSA": "NA"
}
},
"PARI": {
"columns": {
"UVSA": "NA"
}
},
"OCER": {
"columns": {
"LEVL": "CCEROTHNS_TA_X + CCEROTHS_TA_X",
"GROF": "CCEROTHNS_PRQ_X + CCEROTHS_PRQ_X",
"INTF": "CCEROTHNS_FUV_X + CCEROTHS_FUV_X",
"EAAP": "CCEROTHNS_TO_X + CCEROTHS_TO_X",
"UVAG": "ifelse( CCEROTHNS_PRQ_X + CCEROTHS_PRQ_X>0, (CCEROTHNS_SV_X+CCEROTHNS_FCV_X+CCEROTHNS_FUV_X+CCEROTHNS_CV_X-CCEROTHNS_OV_X + CCEROTHS_SV_X+CCEROTHS_FCV_X+CCEROTHS_FUV_X+CCEROTHS_CV_X-CCEROTHS_OV_X)/(CCEROTHNS_PRQ_X + CCEROTHS_PRQ_X), 0 )",
"UVSA": "NA",
"IRTA": "CCEROTHNS_IRA_X"
}
},
"RAPE": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"SUNF": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"SOYA": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"OOIL": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"PULS": {
"columns": {
"LEVL": "CCRPPROT_TA_X",
"GROF": "CCRPPROT_PRQ_X",
"INTF": "CCRPPROT_FUV_X",
"EAAP": "CCRPPROT_SV_X+CCRPPROT_FCV_X+CCRPPROT_FUV_X+CCRPPROT_CV_X-CCRPPROT_OV_X",
"UVAG": "ifelse( CCRPPROT_PRQ_X>0 , (CCRPPROT_SV_X+CCRPPROT_FCV_X+CCRPPROT_FUV_X+CCRPPROT_CV_X-CCRPPROT_OV_X)/CCRPPROT_PRQ_X, 0 )",
"UVSA": "NA",
"IRTA": "NA"
}
},
"POTA": {
"columns": {
"UVSA": "NA"
}
},
"SUGB": {
"columns": {
"UVSA": "NA"
}
},
"TEXT": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"TOBA": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"OIND": {
"columns": {
"LEVL": "CHOP_TA+CCRPINDOTHEXSED_TA_X-CCOTN_TA",
"GROF": "CHOP_PRQ+CCRPINDOTHEXSED_PRQ_X-CCOTN_PRQ",
"INTF": "CHOP_FUV+CCRPINDOTHEXSED_FUV_X-CCOTN_FUV",
"EAAP": "CHOP_SV+CHOP_FCV+CHOP_FUV+CHOP_CV-CHOP_OV + CCRPINDOTHEXSED_SV_X+CCRPINDOTHEXSED_FCV_X+CCRPINDOTHEXSED_FUV_X+CCRPINDOTHEXSED_CV_X-CCRPINDOTHEXSED_OV_X - CCOTN_SV+CCOTN_FCV+CCOTN_FUV+CCOTN_CV-CCOTN_OV",
"UVAG": "NA",
"UVSA": "NA",
"IRTA": "NA"
}
},
"OCRO": {
"columns": {
"LEVL": "CSEEDGRAS_TA_X+CSEEDSOTH_TA_X+CARAOTH_TA+CPERMUG_TA+CCRPPERMOTH_TA",
"GROF": "CSEEDGRAS_PRQ_X+CSEEDSOTH_PRQ_X+CARAOTH_PRQ+CPERMUG_PRQ_X+CCRPPERMOTH_PRQ_X+CYNG_PRQ",
"INTF": "CSEEDGRAS_FUV_X+CSEEDSOTH_FUV_X+CARAOTH_FUV+CPERMUG_FUV+CCRPPERMOTH_FUV+CYNG_FUV",
"EAAP": "CSEEDGRAS_SV_X+CSEEDGRAS_FCV_X+CSEEDGRAS_FUV_X+CSEEDGRAS_CV_X-CSEEDGRAS_OV_X + CSEEDSOTH_SV_X+CSEEDSOTH_FCV_X+CSEEDSOTH_FUV_X+CSEEDSOTH_CV_X-CSEEDSOTH_OV_X + CARAOTH_SV+CARAOTH_FCV+CARAOTH_FUV+CARAOTH_CV-CARAOTH_OV + CPERMUG_SV+CPERMUG_FCV+CPERMUG_FUV+CPERMUG_CV-CPERMUG_OV + CCRPPERMOTH_SV+CCRPPERMOTH_FCV+CCRPPERMOTH_FUV+CCRPPERMOTH_CV-CCRPPERMOTH_OV + CYNG_SV+CYNG_FCV+CYNG_FUV+CYNG_CV-CYNG_OV",
"UVAG": "ifelse( (CSEEDGRAS_PRQ_X+CSEEDSOTH_PRQ_X+CARAOTH_PRQ+CPERMUG_PRQ_X+CCRPPERMOTH_PRQ_X+CYNG_PRQ)>0 , (CSEEDGRAS_SV_X+CSEEDGRAS_FCV_X+CSEEDGRAS_FUV_X+CSEEDGRAS_CV_X-CSEEDGRAS_OV_X + CSEEDSOTH_SV_X+CSEEDSOTH_FCV_X+CSEEDSOTH_FUV_X+CSEEDSOTH_CV_X-CSEEDSOTH_OV_X + CARAOTH_SV+CARAOTH_FCV+CARAOTH_FUV+CARAOTH_CV-CARAOTH_OV + CPERMUG_SV+CPERMUG_FCV+CPERMUG_FUV+CPERMUG_CV-CPERMUG_OV + CCRPPERMOTH_SV+CCRPPERMOTH_FCV+CCRPPERMOTH_FUV+CCRPPERMOTH_CV-CCRPPERMOTH_OV + CYNG_SV+CYNG_FCV+CYNG_FUV+CYNG_CV-CYNG_OV)/(CSEEDGRAS_PRQ_X+CSEEDSOTH_PRQ_X+CARAOTH_PRQ+CPERMUG_PRQ_X+CCRPPERMOTH_PRQ_X+CYNG_PRQ) , 0 )",
"UVSA": "NA",
"IRTA": "NA"
}
},
"TOMA": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"OVEG": {
"columns": {
"LEVL": "CVEGOF_TA+CVEGMG_TA+CVEGUG_TA-CTOMAT_TA",
"GROF": "CVEGOF_PRQ+CVEGMG_PRQ+CVEGUG_PRQ-CTOMAT_PRQ-CNONPERFRU_PRQ_X",
"INTF": "CVEGOF_FUV+CVEGMG_FUV+CVEGUG_FUV-CTOMAT_FUV-CNONPERFRU_FUV_X",
"EAAP": "CVEGOF_SV+CVEGOF_FCV+CVEGOF_FUV+CVEGOF_CV-CVEGOF_OV+CVEGMG_SV+CVEGMG_FCV+CVEGMG_FUV+CVEGMG_CV+CVEGMG_CV-CVEGMG_OV+CVEGUG_SV+CVEGUG_FCV+CVEGUG_FUV+CVEGUG_CV-CVEGUG_OV-CTOMAT_SV+CTOMAT_FCV+CTOMAT_FUV+CTOMAT_CV-CTOMAT_OV-CNONPERFRU_SV_X+CNONPERFRU_FCV_X+CNONPERFRU_FUV_X+CNONPERFRU_CV_X-CNONPERFRU_OV_X",
"UVAG": "NA",
"UVSA": "NA",
"IRTA": "NA"
}
},
"APPL": {
"columns": {
"LEVL": "CAPPLEPEAR_A_X",
"GROF": "CAPPLEPEAR_PRQ_X",
"INTF": "CAPPLEPEAR_FUV_X",
"EAAP": "CAPPLEPEAR_SV_X+CAPPLEPEAR_FCV_X+CAPPLEPEAR_FUV_X+CAPPLEPEAR_CV_X-CAPPLEPEAR_OV_X",
"UVAG": "ifelse( CAPPLEPEAR_PRQ_X>0, (CAPPLEPEAR_SV_X+CAPPLEPEAR_FCV_X+CAPPLEPEAR_FUV_X+CAPPLEPEAR_CV_X-CAPPLEPEAR_OV_X)/(CAPPLEPEAR_PRQ_X), 0 )",
"UVSA": "NA",
"IRTA": "NA"
}
},
"OFRU": {
"columns": {
"LEVL": "CSTNFRUT_A_X+CFRUTTRPL_TA+CNUT_TA+CSMLFRUTBER_A_X+CNONPERFRU_A_X",
"GROF": "CSTNFRUT_PRQ_X+CFRUTTRPL_PRQ+CNUT_PRQ+CSMLFRUTBER_PRQ_X+CNONPERFRU_PRQ_X",
"INTF": "CSTNFRUT_FUV_X+CFRUTTRPL_FUV+CNUT_FUV+CSMLFRUTBER_FUV_X+CNONPERFRU_FUV_X",
"EAAP": "CSTNFRUT_SV_X+CSTNFRUT_FCV_X+CSTNFRUT_FUV_X+CSTNFRUT_CV_X-CSTNFRUT_OV_X + CFRUTTRPL_SV+CFRUTTRPL_FCV+CFRUTTRPL_FUV+CFRUTTRPL_CV-CFRUTTRPL_OV + CNUT_SV+CNUT_FCV+CNUT_FUV+CNUT_CV-CNUT_OV + CSMLFRUTBER_SV_X+CSMLFRUTBER_FCV_X+CSMLFRUTBER_FUV_X+CSMLFRUTBER_CV_X-CSMLFRUTBER_OV_X + CNONPERFRU_SV_X+CNONPERFRU_FCV_X+CNONPERFRU_FUV_X+CNONPERFRU_CV_X-CNONPERFRU_OV_X",
"UVAG": "ifelse( (CSTNFRUT_PRQ_X+CFRUTTRPL_PRQ+CNUT_PRQ+CSMLFRUTBER_PRQ_X+CNONPERFRU_PRQ_X)>0 , (CSTNFRUT_SV_X+CSTNFRUT_FCV_X+CSTNFRUT_FUV_X+CSTNFRUT_CV_X-CSTNFRUT_OV_X + CFRUTTRPL_SV+CFRUTTRPL_FCV+CFRUTTRPL_FUV+CFRUTTRPL_CV-CFRUTTRPL_OV + CNUT_SV+CNUT_FCV+CNUT_FUV+CNUT_CV-CNUT_OV + CSMLFRUTBER_SV_X+CSMLFRUTBER_FCV_X+CSMLFRUTBER_FUV_X+CSMLFRUTBER_CV_X-CSMLFRUTBER_OV_X + CNONPERFRU_SV_X+CNONPERFRU_FCV_X+CNONPERFRU_FUV_X+CNONPERFRU_CV_X-CNONPERFRU_OV_X)/(CSTNFRUT_PRQ_X+CFRUTTRPL_PRQ+CNUT_PRQ+CSMLFRUTBER_PRQ_X+CNONPERFRU_PRQ_X) , 0 )",
"UVSA": "NA",
"IRTA": "NA"
}
},
"CITR": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"TAGR": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"OLIV": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"TABO": {
"columns": {
"UVSA": "NA",
"IRTA": "NA"
}
},
"TWIN": {
"columns": {
"LEVL": "CVIN_A_X-CGRPTAB_TA",
"GROF": "CVIN_PRQ_X-CGRPTAB_PRQ",
"INTF": "CVIN_FUV_X-CGRPTAB_FUV",
"EAAP": "CVIN_SV_X+CVIN_FCV_X+CVIN_FUV_X+CVIN_CV_X-CVIN_OV_X - CGRPTAB_SV+CGRPTAB_FCV+CGRPTAB_FUV+CGRPTAB_CV-CGRPTAB_OV",
"UVAG": "ifelse( (CVIN_PRQ_X-CGRPTAB_PRQ)>0 , (CVIN_SV_X+CVIN_FCV_X+CVIN_FUV_X+CVIN_CV_X-CVIN_OV_X - CGRPTAB_SV+CGRPTAB_FCV+CGRPTAB_FUV+CGRPTAB_CV-CGRPTAB_OV)/(CVIN_PRQ_X-CGRPTAB_PRQ) , 0 )",
"UVSA": "NA",
"IRTA": "NA"
}
},
"NURS": {
"columns": {
"LEVL": "CNURS_TA",
"GROF": "CNURS_PRQ_X",
"INTF": "CNURS_FUV",
"EAAP": "CNURS_SV+CNURS_FCV+CNURS_FUV+CNURS_CV-CNURS_OV",
"UVAG": "ifelse( CNURS_PRQ_X>0 , (CNURS_SV+CNURS_FCV+CNURS_FUV+CNURS_CV-CNURS_OV)/(CNURS_PRQ_X) , 0 ) ",
"UVSA": "NA",
"IRTA": "NA"
}
},
"FLOW": {
"columns": {
"LEVL": "ifelse( (CFLWOUT_TA+CFLWUG_TA)>0,CFLWOUT_TA+CFLWUG_TA,CFLWB_TA+CFLWCUT_TA+CFLW_TA)",
"GROF": "ifelse( (CFLWOUT_PRQ_X+CFLWUG_PRQ_X)>0,CFLWOUT_PRQ_X+CFLWUG_PRQ_X,CFLWB_PRQ_X+CFLWCUT_PRQ_X+CFLW_PRQ_X)",
"INTF": "ifelse( (CFLWOUT_FUV+CFLWUG_FUV)>0,CFLWOUT_FUV+CFLWUG_FUV,CFLWB_FUV+CFLWCUT_FUV+CFLW_FUV)",
"EAAP": "ifelse( (CFLWOUT_TO + CFLWUG_TO)>0 ,CFLWOUT_TO + CFLWUG_TO, CFLWB_TO+ CFLWCUT_TO+CFLW_TO)",
"UVAG": "ifelse( (CFLWOUT_TO + CFLWUG_TO)>0 , ifelse( (CFLWOUT_PRQ_X+CFLWUG_PRQ_X)>0 , (CFLWOUT_TO + CFLWUG_TO)/(CFLWOUT_PRQ_X+CFLWUG_PRQ_X) , 0) , ifelse( (CFLWB_PRQ_X+CFLWCUT_PRQ_X+CFLW_PRQ_X)>0 , (CFLWB_TO+ CFLWCUT_TO+CFLW_TO)/(CFLWB_PRQ_X+CFLWCUT_PRQ_X+CFLW_PRQ_X) , 0) )",
"IRTA": "NA"
}
},
"MAIF": {
"columns": {
"LEVL": "CFODMZ_A",
"UVSA": "NA",
"IRTA": "NA"
}
},
"ROOF": {
"columns": {
"LEVL": "CFODRTBR_A",
"UVSA": "NA",
"IRTA": "NA"
}
},
"PGRA": {
"columns": {
"LEVL": "CGRSXRG_A",
"UVSA": "NA",
"IRTA": "NA"
}
},
"RGRA": {
"columns": {
"LEVL": "CRG_A+CGRSNOUSESUB_A"
}
},
"OFAR": {
"columns": {
"LEVL": "CGRSTMP_A+CSILAGECEROTH_A_X+CFODOTH_A_X",
"GROF": "CGRSTMP_PRQ+CSILAGECEROTH_PRQ_X+CFODOTH_PRQ_X",
"INTF": "CGRSTMP_FUV+CSILAGECEROTH_FUV_X+CFODOTH_FUV_X",
"EAAP": "CGRSTMP_TO+CSILAGECEROTH_TO_X+CFODOTH_TO_X",
"UVAG": "ifelse( (CGRSTMP_PRQ+CSILAGECEROTH_PRQ_X+CFODOTH_PRQ_X)>0, (CGRSTMP_TO+CSILAGECEROTH_TO_X+CFODOTH_TO_X)/(CGRSTMP_PRQ+CSILAGECEROTH_PRQ_X+CFODOTH_PRQ_X), 0 )",
"UVSA": "NA",
"SHARE": "ifelse( (CGRSTMP_SV+CSILAGECEROTH_SV_X+CFODOTH_SV_X)>0, CGRSTMP_FUV+CSILAGECEROTH_FUV_X+(CFODOTH_FUV_X/CGRSTMP_SV)+CSILAGECEROTH_SV_X+CFODOTH_SV, 0)",
"IRTA": "NA"
}
},
"SETA": {
"columns": {
"LEVL": "CFLNDNOSUB_A + CFLNDSUB_A",
"UVSA": "NA",
"IRTA": "NA"
}
},
"FORE": {
"description": "Forest land",
"columns": {
"LEVL": "CWDED_A",
"GROF": "CWDED_PRQ_X",
"INTF": "CWDED_FUV",
"EAAP": "CWDED_TO",
"UVAG": "ifelse( CWDED_PRQ_X>0 , CWDED_TO/CWDED_PRQ_X , 0)",
"IRTA": "NA"
}
},
"CCER": {
"description": "Catch crops cereals (?)",
"columns": {
"LEVL": "CWHTC_TA-CWHTC_A + CWHTD_TA-CWHTD_A + CRYE_TA-CRYE_A + CBRL_TA-CBRL_A + COAT_TA-CBRL_A + COAT_TA-COAT_A + COAT_TA-CCEROTHS_A_X + CMZ_TA-CMZ_A + CRICE_TA-CRICE_A + CCEROTHNS_TA_X-CCEROTHNS_A_X"
}
},
"CPUL": {
"description": "Catch crops pulses (?) ",
"columns": {
"LEVL": "CCRPPROT_TA_X-CCRPPROT_A_X"
}
},
"COCR": {
"description": "Catch crops other crops (?)",
"columns": {
"LEVL": "CPOT_TA-CPOT_A + CSUGBT_TA-CSUGBT_A + CCRPOIL_TA_X-CCRPOIL_A_X + CHOP_TA-CHOP_A + CTOBAC_TA-CTOBAC_A"
}
}
}
}
# Calculate standard results
#
#' Aggregates columns for each farms using a formula
#'
#' @param data a fadn.container, containing all tables
#' @param SEdata a data.table of already calculated SE
#' @param formulaString The formula String to use for aggregation
#' @return [FID VALUE]
#' @export
#' @examples
#' #definition of formula SE610+SE615+SE624-SE626
#' formula=list(add=c("SE610","J830(2)","#289","#267..270"),substract=c("SE626","M632..634(2)"))
#' list(add=c("#48","#49","#50"),substract=list())
#'
#
getFormulaResult=function(data,SEdata,formulaString,aggregator=sum,onlyValue=T) {
if(! ("fadn.container"%in%class(data))) {stop('data is not a fadn.container class')}
result=data.frame(FID=data$tableAI$FID,value=rep(0,nrow(data$tableAI)));
formula=analyzeFormula(formulaString);
for(f in c(formula$add,formula$substract)) {
#warning(f);
if(sum(grepl(f,formula$add,fixed=T))>0) {doAdd=T;} else {doAdd=F;}
if(grepl("^SE.",f)) { #----- Calculate SE section -------------
if(! (f %in% names(SEdata))) {
warning(paste(f," was not found on SEdata. It was ommited from the calculations.",sep=""))
}
else {
result[,f]=SEdata[,f,with=F];
if(doAdd) {result$value=result$value+result[,f];}
else {result$value=result$value-result[,f];}
}
}
else if(grepl("^#.",f)) { #----- Calculate fixed columns section -------------
#warning("^#.");
if(grepl("\\.\\.",f)) { #if a range of vars is given
#warning("range")
reg="^.(.+)(\\.\\.)(.+)$";
from=as.numeric(gsub(reg,"\\1",f));
to=as.numeric(gsub(reg,"\\3",f));
vars=c(seq(from,to));
}
else {
#warning("single")
reg="^.(.+)$";
vars=gsub(reg,"\\1",f);
}
for(v in vars) {
#print(as.numeric(v))
colName=paste("V",v,sep="");
#print(colName);
result[,colName]=data$tableAI[,as.numeric(v),with=FALSE];
#print(data$tableAI[,as.numeric(v),with=FALSE])
if(doAdd) {result$value=result$value+result[,colName];}
else {result$value=result$value-result[,colName];}
}
}
else if(grepl("^[K]",f)) { #----- Calculate K table section -------------
# warning("^[K]")
table=gsub('^(.).+$','\\1',f)
dta=as.data.frame(data$tableK)
# print(dta)
#find col range
col_raw=gsub('^(.+?)(\\()(.+)(\\))$','\\3',f)
# print(col_raw); print(grepl("\\|",col_raw));
if(!grepl("\\:",col_raw)) { #check if there is conditional
col=col_raw;
cond.check=F
}else { #if there is conditional
cond.check=T
col=gsub('(.+?)(\\:)(.+)$','\\1',col_raw);
cond_raw=gsub('(.+?)(\\:)(.+)$','\\3',col_raw);
cond.data=list();
cond.data.raw=strsplit(cond_raw,"&",fixed = T)[[1]];
for(cond.cur in cond.data.raw) {
cond.data[[cond.cur]]=character();
cond.col.val=strsplit(cond.cur,"=",fixed = T)[[1]]
cond.data[[cond.cur]]["col"]=cond.col.val[1]
cond.data[[cond.cur]]["val"]=cond.col.val[2]
if(grepl("\\(",cond.col.val[2])) {
cond.data[[cond.cur]]["val_type"]="c"
} else {
cond.data[[cond.cur]]["val_type"]="val"
}
}
# print(cond_raw);print(cond_col);print(cond_val)
} #end if conditional exists
if(grepl("\\.\\.",col)) {
reg='^(.+?)(\\.\\.)(.+)$'
from=as.numeric(gsub(reg,"\\1",col));
to=as.numeric(gsub(reg,"\\3",col));
cols=c(seq(from,to));
} else {
cols=c(as.numeric(col));
}
# print(cols)
#find var range
if(grepl("^(.+?)(\\.\\.)(.+?)\\(",f)) { #if a range of vars is given
reg="^.(.+?)(\\.\\.)(.+?)\\((.+)$";
from=as.numeric(gsub(reg,"\\1",f));
to=as.numeric(gsub(reg,"\\3",f));
vars=c(seq(from,to))
}
else {
reg="^.(.+?)(\\(.+\\))$";
vars=gsub(reg,"\\1",f);
}
# print(vars)
for(v in vars) {
for(c in cols) {
colName=paste(table,v,"(",c,")",sep="");
# print(colName);
if(cond.check==T) {
cmd1="(dta[,1]==v";
cmd2=""
for(cond.cur in cond.data) {
# print(cond.cur)
cmd2=paste0(cmd2," & dta[,",cond.cur["col"],"]",sep="" )
if(cond.cur["val_type"]=="val") {
cmd2=paste0(cmd2,"==",cond.cur["val"],sep="")
}else {
cmd2=paste0(cmd2,"%in%c",cond.cur["val"],sep="")
}
}
cmd=paste0(cmd1,cmd2,")",sep="");
# print(cmd)
p=eval(parse(text=cmd))
#p=(dta[,1]==v & dta[,as.numeric(cond_col)]==as.numeric(cond_val));
# print(sum(p))
# str(dta)
# print(dta[p,])
}
else {
p=(dta[,1]==v);
}
if(sum(p)>0) {
# print(c)
tk=aggregate(dta[p,c],by=list(dta[p,"FID"]),aggregator)
}
else {
tk=data.frame(FID=seq(1,nrow(data$tableAI)),value=rep(0,nrow(data$tableAI)));
}
names(tk)=c("FID",colName);
# print(head(tk))
result=merge(result,tk,all.x=T,by="FID");
result[is.na(result[,colName]),colName]=0;
if(doAdd) {result$value=result$value+result[,colName];}
else {result$value=result$value-result[,colName];}
} #end for cols
} #end for vars
}
else if(grepl("^[L,M,N]",f)) { #----- Calculate LMN section -------------
# warning("^[L,M,N]")
table=gsub('^(.).+$','\\1',f)
dta=as.data.frame(data$tableJ$tens)
#find col range
col=gsub('^(.+)(\\()(.+)(\\))$','\\3',f)
if(grepl("\\.\\.",col)) {
reg='^(.+)(\\.\\.)(.+)$'
from=as.numeric(gsub(reg,"\\1",col));
to=as.numeric(gsub(reg,"\\3",col));
cols=c(seq(from,to));
}
else {
cols=c(as.numeric(col));
}
# print(cols)
#find var range
if(grepl("^(.+)(\\.\\.)(.+)\\(",f)) { #if a range of vars is given
reg="^.(.+)(\\.\\.)(.+)(\\(.+\\))";
from=as.numeric(gsub(reg,"\\1",f));
to=as.numeric(gsub(reg,"\\3",f));
vars=c(seq(from,to))
}
else {
reg="^.(.+)(\\(.+\\))$";
vars=gsub(reg,"\\1",f);
}
# print(vars)
for(v in vars) {
for(c in cols) {
colName=paste(table,v,"(",c,")",sep="");
# warning(colName)
p=(dta[,3]==v);
# warning(sum(p))
if(sum(p)>0) {
tk=aggregate(dta[p,c],by=list(dta[p,"FID"]),aggregator)
}
else {
tk=data.frame(FID=seq(1,nrow(data$tableAI)),value=rep(0,nrow(data$tableAI)));
}
names(tk)=c("FID",colName);
# print(head(tk))
result=merge(result,tk,all.x=T,by="FID");
result[is.na(result[,colName]),colName]=0;
if(doAdd) {result$value=result$value+result[,colName];}
else {result$value=result$value-result[,colName];}
} #end for cols
} #end for vars
}
else if(grepl("^J",f)) { #----- Calculate J section -------------
table="J";
dta=data.frame(data$tableJ$tens)
#find var range
if(grepl("^(.+)(\\.\\.)(.+)\\(",f)) { #if a range of vars is given
reg="^.(.+)(\\.\\.)(.+)(\\(.+\\))";
from=as.numeric(gsub(reg,"\\1",f));
to=as.numeric(gsub(reg,"\\3",f));
vars=c(seq(from,to))
}
else {
reg="^.(.+)(\\(.+\\))$";
vars=gsub(reg,"\\1",f);
}
# print(vars)
for(v in vars) {
colName=paste(table,v,sep="");
# print(colName)
p=(dta[,"CODE"]==v);
# print(sum(p))
if(sum(p)>0) {
tk=aggregate(dta[p,"AMOUNT"],by=list(dta[p,"FID"]),aggregator)
}
else {
tk=data.frame(FID=seq(1,nrow(data$tableAI)),value=rep(0,nrow(data$tableAI)));
}
names(tk)=c("FID",colName);
# print(head(tk))
result=merge(result,tk,all.x=T,by="FID");
result[is.na(result[,colName]),colName]=0;
if(doAdd) {result$value=result$value+result[,colName];}
else {result$value=result$value-result[,colName];}
} #end for vars
} #end if J
}
attr(result,"formula")=formulaString;
if(onlyValue) {return(result$value);}
else{return(result);}
}
#' Dissagregates a string formula to a list(add=c("SE610","J830(2)","#289","#267..270"),substract=c("SE626","M632..634(2)"))
#'
#' @param formula a formula string, see examples
#' @return list(add=c(),substract=())
#' @export
#' @examples
#' formula="K120..148(7)+K120..148(8)+K120..148(9)+K120..148(10)-K120..148(6)"
#' formula="#48+#49+#50"
#
analyzeFormula=function(formula) {
result=list(add=character(0),substract=character(0));
formula=gsub("\\s","",formula);
s1=strsplit(formula,"(?<=[-+])|(?=[-+])",perl=T)[[1]]
doAdd=T;
for(s in s1) {
if(grepl("\\+",s)) {
doAdd=T;
}
else if(grepl("\\-",s)) {
doAdd=F;
}
else {
if(doAdd){result$add=c(result$add,s)}
else {result$substract=c(result$substract,s)}
}
}
return(result);
}
translateTableCell=function(table,heading,col) {
theMap=list();
theMap$E=list("51"=c(232:233),
"52"=c(234:236),
"53"=c(237:239),
"54"=c(240:242),
"55"=c(243:245),
"56"=c(246:248),
"57"=c(249:251),
"58"=c(252:254)
);
theMap$D=list("22"=c(86:90),
"23"=c(91:95),
"24"=c(96:100),
"25"=c(101:105),
"26"=c(106:110),
"27"=c(111:115),
"28"=c(116:120),
"29"=c(121:125),
"30"=c(126:130),
"31"=c(131:135),
"32"=c(136:140),
"33"=c(141:145),
"34"=c(146:150),
"35"=c(151:155),
"36"=c(156:160),
"37"=c(161:165),
"38"=c(166:170),
"39"=c(171:175),
"40"=c(176:180),
"41"=c(181:185),
"42"=c(186:190),
"43"=c(191:195),
"44"=c(196:200),
"45"=c(201:205),
"46"=c(206:210),
"47"=c(211:215),
"48"=c(216:220),
"49"=c(221:225),
"50"=c(NULL,227,NULL,229)
);
}
This diff is collapsed.
#This file contains functions related to managing data.dir (create, set, read contents, etc.)
#' Creates a data.dir
#'
#' @param folder.path
#' @param raw_str_map.file
#' @param metadata
#'
#' @return TRUE if created succesfully; FALSE otherwise. It return in invisible mode.
#' @export
#'
#' @examples
create.data.dir = function(folder.path,
metadata = "{\n'description': 'No Description Provided',\n'created-by':'',\n'created-at':''\n}") {
if(file.exists(folder.path)) {
#if it is already a data.dir, exit
if(check.data.dir.structure(folder.path)) {
cat("This is already a data.dir structure. Doing nothing.\n")
return(invisible(FALSE));
}
}
else { #if folder does not exist, create it
if(!dir.create(folder.path)) {print("Could not create folder."); return(invisible(FALSE));}
}
#create fadnUtils.metadata ----
cat(metadata, file=paste0(folder.path,"/fadnUtils.metadata.json"))
#create DIR>csv ----
dir.create(paste0(folder.path,"/csv"))
#create DIR>rds ----
dir.create(paste0(folder.path,"/rds"))
#create DIR>spool ----
dir.create(paste0(folder.path,"/spool"))
cat(
"In this folder you can save project related files",
file = paste0(folder.path,"/spool/readme.txt")
)
return(invisible(TRUE));
}
#' Sets the data.dir
#'
#' @param new.data.dir the full path to the folder where the data.dir will be. Ending slash "/" shall not be present
#'
#' @return TRUE if succesfully set the data.dir; FALSE otherwise. Returns in invisible mode.
#' @export
#'
#' @examples
set.data.dir = function(new.data.dir) {
#check that it is a valid data.dir ----
if(!check.data.dir.structure(new.data.dir, silent = F)) {
cat("Not a valid data.dir. cannot set the folder provided.\n");
return(invisible(FALSE));
}
#set option for fadnUtils.data.dir ----
options("fadnUtils.data.dir" = new.data.dir)
#load stored.rds.env ----
stored.rds.env.path = paste0(new.data.dir,"/stored.rds.data.RData")
if(file.exists(stored.rds.env.path)) {
load(stored.rds.env.path,envir = env.stored.rds)
}
return(invisible(TRUE));
}
#' Gets the data.dir
#'
#' data.dir is the folder where data is stored
#' r package will create two subfolders:
#' csv = location to store the csv files of th DG-AGRI (fadn.raw.csv)
#' rds = location to store rds files (fadn.raw.rds, fadn.str.rds, etc.)
#'
#' @return the value of option("fadnUtils.data.dir")
#' @export
#'
#' @examples
get.data.dir = function() {
ret = getOption("fadnUtils.data.dir")
if(is.null(ret)) {
return(NULL)
} else {
if(ret=="") {
return(NULL)
} else {
return(ret)
}
}
}
#' Show the contents of data.dir
#'
#' @param data.dir a specific directory to show contents, otherwise it will read the fadnUtils.data.dir
#' @param return.list if T, returns a list, otherwise print the results
#'
#' @return returns a list containing: {description: "the description of the data dir",
#' DT of fadn.raw.rds (Country-Year) and of fadn.str.rds (country-Year)
#' @export
#'
#' @examples
show.data.dir.contents = function(data.dir=NULL, return.list=F) {
if(is.null(data.dir)) {
data.dir=get.data.dir()
}
if(!check.data.dir.structure(data.dir)) {
warning("Not a valid data.dir. Exiting ....")
return(NULL)
}
#store the results in a list
ret=list()
#get descriptio
ret[['description']]=paste(readLines(paste0(data.dir,'/fadnUtils.metadata.json'),warn = F),collapse = " ")
#get raw data
ret[["raw"]] = get.available.fadn.raw.rds()
#get extracted data
ret[["extractions"]]=list();
extr.dirs = list.dirs(path = paste0(data.dir,"/rds"), full.names = F, recursive = F)
for(d in extr.dirs) {
ret[["extractions"]][[d]]=list()
ret[["extractions"]][[d]][["contents"]] = get.available.fadn.str.rds(extract_dir = d)
}
if(return.list==T) {
return(invisible(ret));
} else {
cat("\n","Description: \n", ret[['description']])
cat("\n\nRaw data: \n")
print(dcast(ret[["raw"]] ,COUNTRY~YEAR,value.var = "COUNTRY",fun.aggregate = length))
cat("\n\nExtracted data : \n")
for(d in extr.dirs) {
cat("\n---- Extracted dir: ", d, "\n")
if(nrow(ret[["extractions"]][[d]][["contents"]])>0 ) {
print(dcast(ret[["extractions"]][[d]][["contents"]] ,COUNTRY~YEAR,value.var = "COUNTRY",fun.aggregate = length))
} else {
cat("No data present")
}
}
cat("\n\n")
}
}
#' Checks if the structure of the fadnUtils.data.dir is ok
#'
#' @param data.dir a specific directory to show contents, otherwise it will read the fadnUtils.data.dir
#' @param silent if TRUE, do not print any message
#'
#' @return TRUe if everything is ok; FALSE otherwise
#' @export
#'
#' @examples
check.data.dir.structure = function(data.dir=NULL, silent=T) {
messages = c()
if(is.null(data.dir)) {data.dir = get.data.dir()}
if(!file.exists(data.dir)) {
messages=c(messages,"Folder provided as data.dir does not exit.");
}
if(!file.exists(paste0(data.dir,"/fadnUtils.metadata.json"))) {
messages=c(messages,"Problem with data.dir: fadnUtils.metadata.json does not exist.")
}
if(!file.exists(paste0(data.dir,"/csv"))) {
messages=c(messages,"Problem with data.dir: 'csv' directory does not exist.")
}
if(!file.exists(paste0(data.dir,"/rds"))) {
messages=c(messages,"Problem with data.dir: 'rds' directory does not exist.")
}
if(length(messages)==0) {return(invisible(TRUE))}
if(!silent) {
cat(messages,sep = "\n")
}
return(invisible(FALSE))
}
#' Returns the available YEAR-COUNTRY fadn.raw.rds
#'
#' @return a DT of the available YEAR-COUNTRY fadn.raw.rds
#'
#' @export
#'
#' @examples
get.available.fadn.raw.rds = function(data.dir=NULL) {
if(is.null(data.dir)) {data.dir=get.data.dir()}
if(is.null(data.dir)) {
warning("Either provide explicitly a fadnUtils.data.dir to the function orfirst to set the fadnUtils.data.dir using set.data.dir function. Exiting ....")
return(FALSE)
}
rds.dir = paste0(data.dir,"/rds/")
rds.avail.files = list.files(rds.dir,pattern = "fadn.raw.*.rds")
pattern = "fadn[.]raw[.](\\d*)[.](\\S*)[.]rds"
fadn.raw.rds.avail = data.table(
YEAR = gsub(pattern,"\\1",rds.avail.files),
COUNTRY = sub(pattern,"\\2",rds.avail.files)
)
return(fadn.raw.rds.avail)
}
#' Returns the available YEAR-COUNTRY fadn.str.rds, for each str.folder
#'
#' @return DT of the available YEAR-COUNTRY fadn.str.rds
#' @param extract_dir The name of the extraction dir
#'
#' @export
#'
#' @examples
get.available.fadn.str.rds = function(data.dir=NULL,extract_dir) {
if(is.null(data.dir)) {data.dir=get.data.dir()}
if(is.null(data.dir)) {
warning("Either provide explicitly a fadnUtils.data.dir or set the fadnUtils.data.dir using set.data.dir function. Exiting ....")
return(FALSE)
}
rds.dir = paste0(data.dir,"/rds/",extract_dir)
rds.avail.files = list.files(rds.dir,pattern = "fadn.str.*.rds")
pattern = "fadn[.]str[.](\\d+)[.](\\S+)[.]rds"
fadn.str.rds.avail = data.table(
YEAR = gsub(pattern,"\\1",rds.avail.files),
COUNTRY = gsub(pattern,"\\2",rds.avail.files)
)
return(fadn.str.rds.avail)
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manage_data_dir.R
\name{show.data.dir.contents}
\alias{show.data.dir.contents}
\title{Show the contents of data.dir}
\usage{
show.data.dir.contents(data.dir = NULL, return.list = F)
}
\arguments{
\item{data.dir}{a specific directory to show contents, otherwise it will read the fadnUtils.data.dir}
\item{return.list}{if T, returns a list, otherwise print the results}
}
\value{
}
\description{
Show the contents of data.dir
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{write.excel}
\alias{write.excel}
\title{Utility to copy data to clipboard for pasting to Excel}
\usage{
write.excel(d, getRownames = F, ...)
}
\arguments{
\item{d}{the data to copy}
\item{getRownames}{set to T to opy also row.names}
\item{...}{any other parameter for passing to write.table}
}
\value{
nothing
}
\description{
Utility to copy data to clipboard for pasting to Excel
}
\examples{
write.excel(d);
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manage_data_dir.R
\name{create.data.dir}
\alias{create.data.dir}
\title{Creates a data.dir}
\usage{
create.data.dir(
folder.path,
metadata = "{\\n'description': 'No Description Provided',\\n'created-by':'',\\n'created-at':''\\n}"
)
}
\arguments{
\item{metadata}{}
}
\value{
TRUE if created succesfully; FALSE otherwise. It return in invisible mode.
}
\description{
Creates a data.dir
}
#' Utility to copy data to clipboard for pasting to Excel
#'
#' @param d the data to copy
#' @param getRownames set to T to opy also row.names
#' @param ... any other parameter for passing to write.table
#' @return nothing
#' @export
#' @examples
#' write.excel(d);
write.excel=function(d,getRownames=F,...) {write.table(d,"clipboard-65000",sep="\t",row.names = getRownames,...)}
#' Updates selected elements of data stored in one DT with new one given in melted format
#'
#' The user provides the data.new: {id,variable,new value}. The function overwrites all existing id-column with the new values
#'
#' @param data.old The DT to update
#' @param data.new The data to insert. It must have three columns: {id,variable,new value}. E.g. data.new=data.table("id"=c(810001100105),"variable"=c("AASBIO_CV"),value=c(999999))
#'
#'
#' @return a DT with the updated values
#' @export
#'
#' @examples
update_elements.DT = function(data.old,data.new) {
id.var = names(data.new)[1]
vars.to.replace = unique(data.new$variable)
for(v in vars.to.replace) {
setkeyv(data.old,id.var)
data.col.new = merge(
data.old[,mget(c(id.var,v))],
data.new[variable==v,mget(c(id.var,"value"))],
all.x=T
)
data.col.new[,value.merged:=ifelse(is.na(value),eval(parse(text = v)),value)]
data.old[,(v):=data.col.new$value.merged]
}
return(data.old)
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raw_str_map.R
\name{check.raw_str_map}
\alias{check.raw_str_map}
\title{Checks if the definitions of a raw_str_map are compatible with a fadn.raw.rds for a certain year and country}
\usage{
check.raw_str_map(raw_str_map.file, fadn.country = NA, fadn.year = NA)
}
\arguments{
\item{raw_str_map.file}{The full filepath of the raw_str_map}
}
\value{
}
\description{
Checks if all values are actual columns of the fadn.raw.rds file
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manage_data_dir.R
\name{set.data.dir}
\alias{set.data.dir}
\title{Sets the data.dir}
\usage{
set.data.dir(new.data.dir)
}
\arguments{
\item{new.data.dir}{the full path to the folder where the data.dir will be. Ending slash "/" shall not be present}
}
\value{
TRUE if succesfully set the data.dir; FALSE otherwise. Returns in invisible mode.
}
\description{
Sets the data.dir
}
^.*\.Rproj$
^\.Rproj\.user$
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardResults.R
\name{analyzeFormula}
\alias{analyzeFormula}
\title{Dissagregates a string formula to a list(add=c("SE610","J830(2)","#289","#267..270"),substract=c("SE626","M632..634(2)"))}
\usage{
analyzeFormula(formula)
}
\arguments{
\item{formula}{a formula string, see examples}
}
\value{
list(add=c(),substract=())
}
\description{
Dissagregates a string formula to a list(add=c("SE610","J830(2)","#289","#267..270"),substract=c("SE626","M632..634(2)"))
}
\examples{
formula="K120..148(7)+K120..148(8)+K120..148(9)+K120..148(10)-K120..148(6)"
formula="#48+#49+#50"
}
env.stored.rds <- new.env(parent = emptyenv());
.onAttach <- function(libname, pkgname) {
# to show a startup message
packageStartupMessage("fadnUtils is loaded.")
}
.onLoad <- function(libname, pkgname) {
}
# Generated by roxygen2: do not edit by hand
export(analyzeFormula)
export(check.data.dir.structure)
export(check.raw_str_map)
export(convert.to.fadn.raw.rds)
export(convert.to.fadn.str.rds)
export(create.data.dir)
export(delete.fadn.raw)
export(delete.fadn.str)
export(get.available.fadn.raw.rds)
export(get.available.fadn.str.rds)
export(get.data.dir)
export(getFormulaResult)
export(grep.columns.in.raw.rds)
export(import.fadn.csv)
export(load.fadn.raw.rds)
export(load.fadn.str.rds)
export(raw_str_map.merge)
export(set.data.dir)
export(show.data.dir.contents)
export(update_elements.DT)
export(write.excel)
import(data.table)
#+eval=FALSE
#...................................................................................#
#
# USE CASE 1 example
#
# Import csv FADN data
#
#...................................................................................#
# In order to use fadnUtils, we must load fadnUtils and data.table
library(fadnUtils)
library(data.table)
# .............. CREATE data.dir ...................................................#
# fadnUtils always work with a user defined data.dir
# Let's assume that the user has not created one yet.
# The following line creates a data.dir folder somewhere in our computer
# We must also have created the raw_str_map.file and pass it as an argument
# to the function. This file is copied to the data.dir folder. Thus, we can
# see the structure of the data contained in a data.dir folder by inspecting
# the raw_str_map.file residing in it.
create.data.dir(
folder.path = "H:/IFM-CAP/sample.fadnutils.dir",
raw_str_map.file = "H:/IFM-CAP/version2/data/raw_str_map.after2013_var_names.json"
)
#Once the data.dir is created, we must declare that we are working with it
set.data.dir(
"H:/IFM-CAP/sample.fadnutils.dir"
)
# .............. IMPORT DATA IN A SINGLE STEP ......................................#
# In order to import the FADN csv files, the simplest way is
# to use import.fadn.csv.
# We provide the full pat of the csv file and explicitly state
# the country and the year this file is refering to.
# Using this function, both the csv data is saved in r-data format and the
# structured r-data are created using the data.dir raw_str_map.file
import.fadn.csv(
file.path = "H:/IFM-CAP/dg_agri_csv/ELL2007.csv", #full path of the cvs file
fadn.country = "ELL",
fadn.year = 2007
)
# We can import many files (countries, years) in a single data.dir
import.fadn.csv(
file.path = "H:/IFM-CAP/dg_agri_csv/ESP2007.csv", #full path of the cvs file
fadn.country = "ESP",
fadn.year = 2007
)
#At any time, we can check for the current data dir, what csv files (countries, year)
# are loaded.
show.data.dir.contents()
# .............. IMPORT DATA IN TWO STEPS ..........................................#
# However you can import the file in two steps, one for converting
# the csv to fadn.raw.str (csv-data to raw r-data) and
# one for converting the fadn.raw.rds to fadn.str.rds (raw r-data
# to structured r-data).
#step 1, convert csv to fadn.raw.rds
convert.to.fadn.raw.rds(
file.path = "H:/IFM-CAP/dg_agri_csv/ELL2015.csv",
fadn.country = "ELL",
fadn.year = 2015
)
# If you check what exists in the data.dir, you will see that
# only the fadn.raw.rds of ELL-2008 exist
show.data.dir.contents()
#Step 2, convert fadn.raw.rds to fadn.str.rds
convert.to.fadn.str.rds(
fadn.country = "ELL",
fadn.year = 2015
)
#check what is loaded
show.data.dir.contents()
# .............. RECALCULATE STRUCTURED DATA ......................................#
# Let's say that
overwrite.raw_str_map.file(
data.dir = "H:/IFM-CAP/sample.fadnutils.dir",
new.raw_str_map.file = "H:/IFM-CAP/version2/data/raw_str_map.after2013_var_names.json"
)
1.0.2
--------
Last issue: 27
TODO:
1. Allow the user to define csv configuration (delimiter, decimal point) and pass it to convert.to.fadn.raw.rds
4. Write a use case where the function convert.to.fadn.str.rds is used to recalculate the raw->str conversion (in order someone change the map manually)
6. Add the option of encrypting the rds files, see here https://stackoverflow.com/questions/52851725/how-to-protect-encrypt-r-objects-in-rdata-files-due-to-eu-gdpr
8. Provide the option to copy rds content from other data.dir directories
11. On 'manage_data_dir.R > overwrite.raw_str_map.file', re-run all convert.to.str.rds operations (currently only the replacement of the file is taking place)
12. Add the possibility for the user to add a column description of the fadn.raw data (providing a text file)
13. Add the following feature: An R-shiny application for browsing loaded fadn.raw. The user can start this with a simple command.
14. Throw a warning message if load.fadn.{raw,str} does not load anything. Say "No files found to belong to this country and years. Nothing loaded"
15. Create a filter.fadn.str. It will take a fadn.str and a filter(for data.table) expression and will keep only the records for info,costs, crops
16. On convert_data > convert.to.fadn.str.rds, use tryCatch() to report the error and not fail
17. In the raw_str_map.json file, provide the option to define factor levels for a variable
18. Provide the ability to delete country/years from the raw/str files
23. Give the possibility to load str.data passing some filtering for an ID field for str.fadn. In load.fadn.str.rds function
26. Save the SExxx variables to the dat.fadn list object (create an entry in raw_str_maps and add code in the convert.to.fadn.str.rds function)
27. Keep the raw.fadn.rds also in a long format (sparse matrix). Ability to select how to load (wide or long format). Need to know which variables are numeric and which are strings. Keep them in different DT. Long format will return a list with one DT with the numeric values and one with the string values.
CHANGES UNDER WAY:
21. Provide the ability to use an external raw_str_map file (use it and copy it to raw_str_maps).
22. Add the content of the raw_str_map used for convert.to.fadn.str.rds in the attribute of the rds data.
9. Provide full documentation of raw_str_map.json specification (already some in the doc of convert.to.fadn.str.rds function)
CHANGES COMPLETED: (In date-completed descending order / newer changes on the top)
28. Utility function: Update an fadn.raw.rds file with external data (rows of id-column-new value). Load the data and update them with the new values.
27. Give the possibility to load raw.data with row selection based on a criterion (examples: column_x == xxx; column_x>xxx, etc. ) In load.fadn.raw.rds function
24. Provide the ability when load.fadn.raw to pass a vector of columns to load (and discard the rest)
25. added a DEBUG mode for convert.to.fadn.str.rds (detailed information on what is calculated is shown)
20. Write a function that merges two raw_str_map.json files. It will be used if one wants to have a basic raw_str_map and wants to make marginal changes for a specific case (year or country)
19. Provide the ability to use more than one raw_str_map.json (create.data with a vector of raw_str_map.json files, show in contents the raw_str_map.json files, check data dir straucture changes, ,convert with specifying which)
2. Save loaded data to stored.rds.data.RData added store.rds.data function, restore.rds.stored.data function, also show the saved.data rds in the show.data.dir.contents)
3. Provide the option to provide a file with the description of the variables for the fadn.str.rds files (data.dir specific). Probably alter the raw_str_map.json specification
5. Create a folder spool, where the users can put relevant files
1. Make the map_definition an organic part of the fadnUtils.data.dir
2. Add the option of storing/not storing the original csv from DG AGRI in the data.dir folder
10. On 'load.fadn.str.rds', output the message "Loading from ..." with <cat> instead of <print>
====================================================
OLD
====================================================
1.0.1
--------
CHANGES:
1. Keep data in folder, not included in the package
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.