diff --git a/DESCRIPTION b/DESCRIPTION index 75743c68218629675598bf1fda8673e2921b5f92..9d9934751219edac46da063724c718246da94bb0 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,35 +1,11 @@ Package: s2dv -Title: Set of Common Tools for Forecast Verification +Title: Set of Common Tools for Seasonal to Decadal Verification Version: 0.0.1 Authors@R: c( person("BSC-CNS", role = c("aut", "cph")), - person("Virginie", "Guemas", , "virginie.guemas@bsc.es", role = "aut"), - person("Nicolau", "Manubens", , "nicolau.manubens@bsc.es", role = "aut"), - person("An-Chi", "Ho", , "an.ho@bsc.es", role = c("ctb", "cre")), - person("Nuria", "Perez-Zanon", , "nuria.perez@bsc.es", role = "ctb"), - person("Javier", "Garcia-Serrano", , "javier.garcia@bsc.es", role = "aut"), - person("Neven", "Fuckar", , "neven.fuckar@bsc.es", role = "aut"), - person("Louis-Philippe", "Caron", , "louis-philippe.caron@bsc.es", role = "aut"), - person("Omar", "Bellprat", , "omar.bellprat@bsc.es", role = "aut"), - person("Luis", "Rodrigues", , "lrodrigues@ic3.cat", role = "aut"), - person("Veronica", "Torralba", , "veronica.torralba@bsc.es", role = "aut"), - person("Alasdair", "Hunter", , "alasdair.hunter@bsc.es", role = "aut"), - person("Chloe", "Prodhomme", , "chloe.prodhomme@bs.es", role = "aut"), - person("Martin", "Menegoz", , "martin.menegoz@bsc.es", role = "aut"), - person("Domingo", "Manubens", , "domingo.manubens@bsc.es", role = "ctb"), - person("Constantin", "Ardilouze", , "constantin.ardilouze@meteo.fr", role = "ctb"), - person("Lauriane", "Batte", , "lauriane.batte@meteo.fr", role = "ctb"), - person("Fabian", "Lienert", , "fabian.lienert@bsc.es", role = "ctb"), - person("Julia", "Giner", , "julia.giner@bsc.es", role = "ctb"), - person("Jean-Philippe", "Baudouin", , "jean.baudouin@bsc.es", role = "ctb"), - person("Nube", "Gonzalez", , "nube.gonzalez@bsc.es", role = "ctb"), - person("Ludovic", "Auger", , "ludovic.auger@meteo.fr", role = "ctb"), - person("Nicola", "Cortesi", , "nicola.cortesi@bsc.es", role = "ctb"), - person("Eleftheria", "Exarchou", , "eleftheria.exarchou@bsc.es", role = "ctb"), - person("Ruben", "Cruz", , "ruben.cruzgarcia@bsc.es", role = "ctb"), - person("Isabel", "Andreu-Burillo", , "isabel.andreu.burillo@ic3.cat", role = "ctb"), - person("Ramiro", "Saurral", , "ramiro.saurral@ic3.cat", role = "ctb")) -Description: s2dv is the advanced version of package 's2dverification'. It is + person("An-Chi", "Ho", , "an.ho@bsc.es", role = c("aut", "cre")), + person("Nuria", "Perez-Zanon", , "nuria.perez@bsc.es", role = "aut")) +Description: The advanced version of package 's2dverification'. It is intended for 'seasonal to decadal' (s2d) climate forecast verification, but it can also be used in other kinds of forecasts or general climate analysis. This package is specially designed for the comparison between the experimental @@ -47,18 +23,20 @@ Imports: bigmemory, GEOmap, geomapdata, + graphics, + grDevices, mapproj, - NbClust, - ncdf4, parallel, + ClimProjDiags, + stats, plyr, - SpecsVerification (>= 0.5.0), + ncdf4, multiApply (>= 2.0.0) Suggests: easyVerification, testthat License: LGPL-3 -URL: https://earth.bsc.es/gitlab/es/s2dv/wikis/home +URL: https://earth.bsc.es/gitlab/es/s2dv/ BugReports: https://earth.bsc.es/gitlab/es/s2dv/issues LazyData: true SystemRequirements: cdo diff --git a/NAMESPACE b/NAMESPACE index 1457c4b441e45a4037f987e684d7fa4f97108667..8be5ed837de6a0f155362a8141a4fc790ef93b09 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,26 +1,32 @@ # Generated by roxygen2: do not edit by hand export(AnimateMap) -export(CDORemap) export(Clim) export(ColorBar) +export(ConfigAddEntry) +export(ConfigApplyMatchingEntries) +export(ConfigEditDefinition) +export(ConfigEditEntry) +export(ConfigFileCreate) +export(ConfigFileOpen) +export(ConfigFileSave) +export(ConfigRemoveDefinition) +export(ConfigRemoveEntry) +export(ConfigShowDefinitions) +export(ConfigShowSimilarEntries) +export(ConfigShowTable) export(Corr) export(Eno) export(InsertDim) export(LeapYear) export(Load) export(MeanDims) -export(Plot2VarsVsLTime) -export(PlotACC) -export(PlotAno) -export(PlotBoxWhisker) export(PlotClim) export(PlotEquiMap) export(PlotLayout) export(PlotMatrix) export(PlotSection) export(PlotStereoMap) -export(PlotVsLTime) export(RMS) export(RMSSS) export(Regression) @@ -40,22 +46,19 @@ import(methods) import(multiApply) import(ncdf4) import(parallel) +importFrom(ClimProjDiags,Subset) +importFrom(abind,abind) importFrom(abind,adrop) -importFrom(grDevices,bmp) importFrom(grDevices,col2rgb) importFrom(grDevices,colorRampPalette) importFrom(grDevices,dev.cur) importFrom(grDevices,dev.new) importFrom(grDevices,dev.off) importFrom(grDevices,gray) -importFrom(grDevices,jpeg) -importFrom(grDevices,pdf) -importFrom(grDevices,png) importFrom(grDevices,postscript) importFrom(grDevices,rainbow) importFrom(grDevices,rgb) -importFrom(grDevices,svg) -importFrom(grDevices,tiff) +importFrom(plyr,take) importFrom(stats,acf) importFrom(stats,confint) importFrom(stats,cor) @@ -64,10 +67,8 @@ importFrom(stats,median) importFrom(stats,na.fail) importFrom(stats,na.omit) importFrom(stats,na.pass) -importFrom(stats,predict) importFrom(stats,pt) importFrom(stats,qnorm) importFrom(stats,rnorm) -importFrom(stats,setNames) importFrom(stats,ts) importFrom(stats,window) diff --git a/R/AnimateMap.R b/R/AnimateMap.R index 83667d2a2199029677094018d6a4ed248450d795..1cd1ffe079ce77b04d68bf8433c4573c9635799c 100644 --- a/R/AnimateMap.R +++ b/R/AnimateMap.R @@ -89,16 +89,10 @@ #' } #'} #' -#'@keywords dynamic -#'@author History:\cr -#' 1.0 - 2012-04 (V. Guemas, \email{virginie.guemas@@bsc.es}) - Original code\cr -#' 1.1 - 2014-04 (N. Manubens, \email{nicolau.manubens@@bsc.es}) - Formatting to CRAN\cr -#' 1.2 - 2015-05 (V. Guemas, \email{virginie.guemas@@bsc.es}) - Use of PlotEquiMap and PlotStereoMap -#' #'@examples #'# See ?Load for explanations on the first part of this example #' \dontrun{ -#'data_path <- system.file('sample_data', package = 's2dverification') +#'data_path <- system.file('sample_data', package = 's2dv') #'expA <- list(name = 'experiment', path = file.path(data_path, #' 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', #' '$VAR_NAME$_$START_DATE$.nc')) @@ -114,11 +108,11 @@ #' } #' \dontshow{ #'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -#'sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), -#' c('observation'), startDates, -#' output = 'lonlat', -#' latmin = 27, latmax = 48, -#' lonmin = -12, lonmax = 40) +#'sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), +#' c('observation'), startDates, +#' output = 'lonlat', +#' latmin = 27, latmax = 48, +#' lonmin = -12, lonmax = 40) #' } #'clim <- Clim(sampleData$mod, sampleData$obs, memb = FALSE) #' \dontrun{ @@ -128,33 +122,8 @@ #' msk95lev = FALSE, filled.continents = TRUE, intlon = 10, intlat = 10, #' fileout = 'clim_dec.gif') #' } -#'ano_exp <- Ano(sampleData$mod, clim$clim_exp) -#'ano_obs <- Ano(sampleData$obs, clim$clim_obs) -#'leadtimes_dimension <- 4 -#'initial_month <- 11 -#'mean_start_month <- 1 -#'mean_stop_month <- 12 -#'season_means_mod <- Season(ano_exp, leadtimes_dimension, initial_month, -#' mean_start_month, mean_stop_month) -#'season_means_obs <- Season(ano_obs, leadtimes_dimension, initial_month, -#' mean_start_month, mean_stop_month) -#' \dontrun{ -#'AnimateMap(Mean1Dim(season_means_mod, 2)[1, 1, , , ], sampleData$lon, -#' sampleData$lat, toptitle = "Annual anomalies 1985 of decadal prediction", -#' sizetit = 1, units = "degree", monini = 1, freq = 1, msk95lev = FALSE, -#' brks = seq(-0.5, 0.5, 0.1), intlon = 10, intlat = 10, -#' filled.continents = TRUE, fileout = 'annual_means_dec.gif') -#' } -#'dim_to_mean <- 2 # Mean along members -#'rms <- RMS(Mean1Dim(season_means_mod, dim_to_mean), -#' Mean1Dim(season_means_obs, dim_to_mean)) -#' \donttest{ -#'AnimateMap(rms, sampleData$lon, sampleData$lat, toptitle = -#' "RMSE decadal prediction", sizetit = 1, units = "degree", -#' monini = 1, freq = 1, msk95lev = FALSE, brks = seq(0, 0.8, 0.08), -#' intlon = 10, intlat = 10, filled.continents = TRUE, -#' fileout = 'rmse_dec.gif') -#' } +#' # More examples in s2dverification but are deleted for now +#' #'@importFrom grDevices postscript dev.off #'@export AnimateMap <- function(var, lon, lat, toptitle = rep("", 11), sizetit = 1, diff --git a/R/CDORemap.R b/R/CDORemap.R deleted file mode 100644 index ea6ff1374333384d7b7298b4d5bb19f1e529b181..0000000000000000000000000000000000000000 --- a/R/CDORemap.R +++ /dev/null @@ -1,1033 +0,0 @@ -#'Interpolates arrays with longitude and latitude dimensions using CDO -#' -#'This function takes as inputs a multidimensional array (optional), a vector -#'or matrix of longitudes, a vector or matrix of latitudes, a destination grid -#'specification, and the name of a method to be used to interpolate (one of -#'those available in the 'remap' utility in CDO). The interpolated array is -#'returned (if provided) together with the new longitudes and latitudes.\cr\cr -#'\code{CDORemap()} permutes by default the dimensions of the input array (if -#'needed), splits it in chunks (CDO can work with data arrays of up to 4 -#'dimensions), generates a file with the data of each chunk, interpolates it -#'with CDO, reads it back into R and merges it into a result array. If no -#'input array is provided, the longitude and latitude vectors will be -#'transformed only. If the array is already on the desired destination grid, -#'no transformation is performed (this behvaiour works only for lonlat and -#'gaussian grids). \cr\cr -#'Any metadata attached to the input data array, longitudes or latitudes will -#'be preserved or accordingly modified. -#' -#'@param data_array Multidimensional numeric array to be interpolated. If -#' provided, it must have at least a longitude and a latitude dimensions, -#' identified by the array dimension names. The names for these dimensions -#' must be one of the recognized by s2dverification (can be checked with -#' \code{s2dverification:::.KnownLonNames()} and -#' \code{s2dverification:::.KnownLatNames()}). -#'@param lons Numeric vector or array of longitudes of the centers of the grid -#' cells. Its size must match the size of the longitude/latitude dimensions -#' of the input array. -#'@param lats Numeric vector or array of latitudes of the centers of the grid -#' cells. Its size must match the size of the longitude/latitude dimensions -#' of the input array. -#'@param grid Character string specifying either a name of a target grid -#' (recognized by CDO; e.g.: 'r256x128', 't106grid') or a path to another -#' NetCDF file which to read the target grid from (a single grid must be -#' defined in such file). -#'@param method Character string specifying an interpolation method -#' (recognized by CDO; e.g.: 'con', 'bil', 'bic', 'dis'). The following -#' long names are also supported: 'conservative', 'bilinear', 'bicubic' and -#' 'distance-weighted'. -#'@param avoid_writes The step of permutation is needed when the input array -#' has more than 3 dimensions and none of the longitude or latitude dimensions -#' in the right-most position (CDO would not accept it without permuting -#' previously). This step, executed by default when needed, can be avoided -#' for the price of writing more intermediate files (whis usually is -#' unconvenient) by setting the parameter \code{avoid_writes = TRUE}. -#'@param crop Whether to crop the data after interpolation with -#' 'cdo sellonlatbox' (TRUE) or to extend interpolated data to the whole -#' world as CDO does by default (FALSE). If \code{crop = TRUE} then the -#' longitude and latitude borders which to crop at are taken as the limits of -#' the cells at the borders ('lons' and 'lats' are perceived as cell centers), -#' i.e. the resulting array will contain data that covers the same area as -#' the input array. This is equivalent to specifying \code{crop = 'preserve'}, -#' i.e. preserving area. If \code{crop = 'tight'} then the borders which to -#' crop at are taken as the minimum and maximum cell centers in 'lons' and -#' 'lats', i.e. the area covered by the resulting array may be smaller if -#' interpolating from a coarse grid to a fine grid. The parameter 'crop' also -#' accepts a numeric vector of custom borders which to crop at: -#' c(western border, eastern border, southern border, northern border). -#'@param force_remap Whether to force remapping, even if the input data array -#' is already on the target grid. -#'@param write_dir Path to the directory where to create the intermediate -#' files for CDO to work. By default, the R session temporary directory is -#' used (\code{tempdir()}). -#' -#'@return A list with the following components: -#' \item{'data_array'}{The interpolated data array (if an input array -#' is provided at all, NULL otherwise).} -#' \item{'lons'}{The longitudes of the data on the destination grid.} -#' \item{'lats'}{The latitudes of the data on the destination grid.} -#'@keywords datagen -#'@author History:\cr -#' 0.0 - 2017-01 (N. Manubens, \email{nicolau.manubens@@bsc.es}) - Original code. -#'@examples -#' \dontrun{ -#'# Interpolating only vectors of longitudes and latitudes -#'lon <- seq(0, 360 - 360/50, length.out = 50) -#'lat <- seq(-90, 90, length.out = 25) -#'tas2 <- CDORemap(NULL, lon, lat, 't170grid', 'bil', TRUE) -#' -#'# Minimal array interpolation -#'tas <- array(1:50, dim = c(25, 50)) -#'names(dim(tas)) <- c('lat', 'lon') -#'lon <- seq(0, 360 - 360/50, length.out = 50) -#'lat <- seq(-90, 90, length.out = 25) -#'tas2 <- CDORemap(tas, lon, lat, 't170grid', 'bil', TRUE) -#' -#'# Metadata can be attached to the inputs. It will be preserved and -#'# accordignly modified. -#'tas <- array(1:50, dim = c(25, 50)) -#'names(dim(tas)) <- c('lat', 'lon') -#'lon <- seq(0, 360 - 360/50, length.out = 50) -#'metadata <- list(lon = list(units = 'degrees_east')) -#'attr(lon, 'variables') <- metadata -#'lat <- seq(-90, 90, length.out = 25) -#'metadata <- list(lat = list(units = 'degrees_north')) -#'attr(lat, 'variables') <- metadata -#'metadata <- list(tas = list(dim = list(lat = list(len = 25, -#' vals = lat), -#' lon = list(len = 50, -#' vals = lon) -#' ))) -#'attr(tas, 'variables') <- metadata -#'tas2 <- CDORemap(tas, lon, lat, 't170grid', 'bil', TRUE) -#' -#'# Arrays of any number of dimensions in any order can be provided. -#'num_lats <- 25 -#'num_lons <- 50 -#'tas <- array(1:(10*num_lats*10*num_lons*10), -#' dim = c(10, num_lats, 10, num_lons, 10)) -#'names(dim(tas)) <- c('a', 'lat', 'b', 'lon', 'c') -#'lon <- seq(0, 360 - 360/num_lons, length.out = num_lons) -#'metadata <- list(lon = list(units = 'degrees_east')) -#'attr(lon, 'variables') <- metadata -#'lat <- seq(-90, 90, length.out = num_lats) -#'metadata <- list(lat = list(units = 'degrees_north')) -#'attr(lat, 'variables') <- metadata -#'metadata <- list(tas = list(dim = list(a = list(), -#' lat = list(len = num_lats, -#' vals = lat), -#' b = list(), -#' lon = list(len = num_lons, -#' vals = lon), -#' c = list() -#' ))) -#'attr(tas, 'variables') <- metadata -#'tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', TRUE) -#'# The step of permutation can be avoided but more intermediate file writes -#'# will be performed. -#'tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE) -#' -#'# If the provided array has the longitude or latitude dimension in the -#'# right-most position, the same number of file writes will be performed, -#'# even if avoid_wrties = FALSE. -#'num_lats <- 25 -#'num_lons <- 50 -#'tas <- array(1:(10*num_lats*10*num_lons*10), -#' dim = c(10, num_lats, 10, num_lons)) -#'names(dim(tas)) <- c('a', 'lat', 'b', 'lon') -#'lon <- seq(0, 360 - 360/num_lons, length.out = num_lons) -#'metadata <- list(lon = list(units = 'degrees_east')) -#'attr(lon, 'variables') <- metadata -#'lat <- seq(-90, 90, length.out = num_lats) -#'metadata <- list(lat = list(units = 'degrees_north')) -#'attr(lat, 'variables') <- metadata -#'metadata <- list(tas = list(dim = list(a = list(), -#' lat = list(len = num_lats, -#' vals = lat), -#' b = list(), -#' lon = list(len = num_lons, -#' vals = lon) -#' ))) -#'attr(tas, 'variables') <- metadata -#'tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', TRUE) -#'tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE) -#' -#'# An example of an interpolation from and onto a rectangular regular grid -#'num_lats <- 25 -#'num_lons <- 50 -#'tas <- array(1:(1*num_lats*num_lons), dim = c(num_lats, num_lons)) -#'names(dim(tas)) <- c('y', 'x') -#'lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons), -#' dim = c(num_lons, num_lats)) -#'metadata <- list(lon = list(units = 'degrees_east')) -#'names(dim(lon)) <- c('x', 'y') -#'attr(lon, 'variables') <- metadata -#'lat <- t(array(seq(-90, 90, length.out = num_lats), -#' dim = c(num_lats, num_lons))) -#'metadata <- list(lat = list(units = 'degrees_north')) -#'names(dim(lat)) <- c('x', 'y') -#'attr(lat, 'variables') <- metadata -#'tas2 <- CDORemap(tas, lon, lat, 'r100x50', 'bil') -#' -#'# An example of an interpolation from an irregular grid onto a gaussian grid -#'num_lats <- 25 -#'num_lons <- 50 -#'tas <- array(1:(10*num_lats*10*num_lons*10), -#' dim = c(10, num_lats, 10, num_lons)) -#'names(dim(tas)) <- c('a', 'j', 'b', 'i') -#'lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons), -#' dim = c(num_lons, num_lats)) -#'metadata <- list(lon = list(units = 'degrees_east')) -#'names(dim(lon)) <- c('i', 'j') -#'attr(lon, 'variables') <- metadata -#'lat <- t(array(seq(-90, 90, length.out = num_lats), -#' dim = c(num_lats, num_lons))) -#'metadata <- list(lat = list(units = 'degrees_north')) -#'names(dim(lat)) <- c('i', 'j') -#'attr(lat, 'variables') <- metadata -#'tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil') -#' -#'# Again, the dimensions can be in any order -#'num_lats <- 25 -#'num_lons <- 50 -#'tas <- array(1:(10*num_lats*10*num_lons), -#' dim = c(10, num_lats, 10, num_lons)) -#'names(dim(tas)) <- c('a', 'j', 'b', 'i') -#'lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons), -#' dim = c(num_lons, num_lats)) -#'names(dim(lon)) <- c('i', 'j') -#'lat <- t(array(seq(-90, 90, length.out = num_lats), -#' dim = c(num_lats, num_lons))) -#'names(dim(lat)) <- c('i', 'j') -#'tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil') -#'tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE) -#'# It is ossible to specify an external NetCDF file as target grid reference -#'tas2 <- CDORemap(tas, lon, lat, 'external_file.nc', 'bil') -#'} -#'@import ncdf4 -#'@importFrom stats lm predict setNames -#'@export -CDORemap <- function(data_array = NULL, lons, lats, grid, method, - avoid_writes = TRUE, crop = TRUE, - force_remap = FALSE, write_dir = tempdir()) { #, mask = NULL) { - .isRegularVector <- function(x, tol = 0.1) { - if (length(x) < 2) { - #stop("The provided vector must be of length 2 or greater.") - TRUE - } else { - spaces <- x[2:length(x)] - x[1:(length(x) - 1)] - (sum(abs(spaces - mean(spaces)) > mean(spaces) / (1 / tol)) < 2) - } - } - # Check parameters data_array, lons and lats. - known_lon_names <- .KnownLonNames() - known_lat_names <- .KnownLatNames() - if (!is.numeric(lons) || !is.numeric(lats)) { - stop("Expected numeric 'lons' and 'lats'.") - } - if (any(is.na(lons > 0))) { - stop("Found invalid values in 'lons'.") - } - if (any(is.na(lats > 0))) { - stop("Found invalid values in 'lats'.") - } - if (is.null(dim(lons))) { - dim(lons) <- length(lons) - } - if (is.null(dim(lats))) { - dim(lats) <- length(lats) - } - if (length(dim(lons)) > 2 || length(dim(lats)) > 2) { - stop("'lons' and 'lats' can only have up to 2 dimensions.") - } - if (length(dim(lons)) != length(dim(lats))) { - stop("'lons' and 'lats' must have the same number of dimensions.") - } - if (length(dim(lons)) == 2 && !all(dim(lons) == dim(lats))) { - stop("'lons' and 'lats' must have the same dimension sizes.") - } - return_array <- TRUE - if (is.null(data_array)) { - return_array <- FALSE - if (length(dim(lons)) == 1) { - array_dims <- c(length(lats), length(lons)) - new_lon_dim_name <- 'lon' - new_lat_dim_name <- 'lat' - } else { - array_dims <- dim(lons) - new_lon_dim_name <- 'i' - new_lat_dim_name <- 'j' - } - if (!is.null(names(dim(lons)))) { - if (any(known_lon_names %in% names(dim(lons)))) { - new_lon_dim_name <- known_lon_names[which(known_lon_names %in% names(dim(lons)))[1]] - } - } - if (!is.null(names(dim(lats)))) { - if (any(known_lat_names %in% names(dim(lats)))) { - new_lat_dim_name <- known_lat_names[which(known_lat_names %in% names(dim(lats)))[1]] - } - } - names(array_dims) <- c(new_lat_dim_name, new_lon_dim_name) - data_array <- array(as.numeric(NA), array_dims) - } - if (!(is.logical(data_array) || is.numeric(data_array)) || !is.array(data_array)) { - stop("Parameter 'data_array' must be a numeric array.") - } - if (is.null(names(dim(data_array)))) { - stop("Parameter 'data_array' must have named dimensions.") - } - lon_dim <- which(known_lon_names %in% names(dim(data_array))) - if (length(lon_dim) < 1) { - stop("Could not find a known longitude dimension name in the provided 'data_array'.") - } - if (length(lon_dim) > 1) { - stop("Found more than one known longitude dimension names in the provided 'data_array'.") - } - lon_dim <- known_lon_names[lon_dim] - lat_dim <- which(known_lat_names %in% names(dim(data_array))) - if (length(lat_dim) < 1) { - stop("Could not find a known latitude dimension name in the provided 'data_array'.") - } - if (length(lat_dim) > 1) { - stop("Found more than one known latitude dimension name in the provided 'data_array'.") - } - lat_dim <- known_lat_names[lat_dim] - if (is.null(names(dim(lons)))) { - if (length(dim(lons)) == 1) { - names(dim(lons)) <- lon_dim - } else { - stop("Parameter 'lons' must be provided with dimension names.") - } - } else { - if (!(lon_dim %in% names(dim(lons)))) { - stop("Parameter 'lon' must have the same longitude dimension name as the 'data_array'.") - } - if (length(dim(lons)) > 1 && !(lat_dim %in% names(dim(lons)))) { - stop("Parameter 'lon' must have the same latitude dimension name as the 'data_array'.") - } - } - if (is.null(names(dim(lats)))) { - if (length(dim(lats)) == 1) { - names(dim(lats)) <- lat_dim - } else { - stop("Parameter 'lats' must be provided with dimension names.") - } - } else { - if (!(lat_dim %in% names(dim(lats)))) { - stop("Parameter 'lat' must have the same latitude dimension name as the 'data_array'.") - } - if (length(dim(lats)) > 1 && !(lon_dim %in% names(dim(lats)))) { - stop("Parameter 'lat' must have the same longitude dimension name as the 'data_array'.") - } - } - lons_attr_bk <- attributes(lons) - if (is.null(lons_attr_bk)) { - lons_attr_bk <- list() - } - lats_attr_bk <- attributes(lats) - if (is.null(lats_attr_bk)) { - lats_attr_bk <- list() - } - if (length(attr(lons, 'variables')) == 0) { - new_metadata <- list(list()) - if (length(dim(lons)) == 1) { - names(new_metadata) <- lon_dim - } else { - names(new_metadata) <- paste0(lon_dim, '_var') - } - attr(lons, 'variables') <- new_metadata - } - if (!('units' %in% names(attr(lons, 'variables')[[1]]))) { - new_metadata <- attr(lons, 'variables') - #names(new_metadata)[1] <- lon_dim - new_metadata[[1]][['units']] <- 'degrees_east' - attr(lons, 'variables') <- new_metadata - } - if (length(attr(lats, 'variables')) == 0) { - new_metadata <- list(list()) - if (length(dim(lats)) == 1) { - names(new_metadata) <- lat_dim - } else { - names(new_metadata) <- paste0(lat_dim, '_var') - } - attr(lats, 'variables') <- new_metadata - } - if (!('units' %in% names(attr(lats, 'variables')[[1]]))) { - new_metadata <- attr(lats, 'variables') - #names(new_metadata)[1] <- lat_dim - new_metadata[[1]][['units']] <- 'degrees_north' - attr(lats, 'variables') <- new_metadata - } - # Check grid. - if (!is.character(grid)) { - stop("Parameter 'grid' must be a character string specifying a ", - "target CDO grid, 'rXxY' or 'tRESgrid', or a path to another ", - "NetCDF file.") - } - if (grepl('^r[0-9]{1,}x[0-9]{1,}$', grid)) { - grid_type <- 'regular' - grid_lons <- as.numeric(strsplit(strsplit(grid, 'x')[[1]][1], 'r')[[1]][2]) - grid_lats <- as.numeric(strsplit(grid, 'x')[[1]][2]) - } else if (grepl('^t[0-9]{1,}grid$', grid)) { - grid_type <- 'gaussian' - grid_t <- as.numeric(strsplit(strsplit(grid, 'grid')[[1]][1], 't')[[1]][2]) - grid_size <- .t2nlatlon(grid_t) - grid_lons <- grid_size[2] - grid_lats <- grid_size[1] - } else { - grid_type <- 'custom' - } - # Check method. - if (method %in% c('bil', 'bilinear')) { - method <- 'bil' - } else if (method %in% c('bic', 'bicubic')) { - method <- 'bic' - } else if (method %in% c('con', 'conservative')) { - method <- 'con' - } else if (method %in% c('dis', 'distance-weighted')) { - method <- 'dis' - } else { - stop("Unsupported CDO remap method. 'bilinear', 'bicubic', 'conservative' or 'distance-weighted' supported only.") - } - # Check avoid_writes - if (!is.logical(avoid_writes)) { - stop("Parameter 'avoid_writes' must be a logical value.") - } - # Check crop - crop_tight <- FALSE - if (is.character(crop)) { - if (crop == 'tight') { - crop_tight <- TRUE - } else if (crop != 'preserve') { - stop("Parameter 'crop' can only take the values 'tight' or 'preserve' if specified as a character string.") - } - crop <- TRUE - } - if (is.logical(crop)) { - if (crop) { - warning("Parameter 'crop' = 'TRUE'. The output grid range will follow the input lons and lats.") - if (length(lons) == 1 || length(lats) == 1) { - stop("CDORemap cannot remap if crop = TRUE and values for only one ", - "longitude or one latitude are provided. Either a) provide ", - "values for more than one longitude/latitude, b) explicitly ", - "specify the crop limits in the parameter crop, or c) set ", - "crop = FALSE.") - } - if (crop_tight) { - lon_extremes <- c(min(lons), max(lons)) - lat_extremes <- c(min(lats), max(lats)) - } else { - # Here we are trying to look for the extreme lons and lats in the data. - # Not the centers of the extreme cells, but the borders of the extreme cells. -###--- - if (length(dim(lons)) == 1) { - tmp_lon <- lons - } else { - min_pos <- which(lons == min(lons), arr.ind = TRUE)[1, ] - tmp_lon <- Subset(lons, lat_dim, min_pos[which(names(dim(lons)) == lat_dim)], drop = 'selected') - } - i <- 1:length(tmp_lon) - degree <- min(3, length(i) - 1) - lon_model <- lm(tmp_lon ~ poly(i, degree)) - lon_extremes <- c(NA, NA) - left_is_min <- FALSE - right_is_max <- FALSE - if (which.min(tmp_lon) == 1) { - left_is_min <- TRUE - prev_lon <- predict(lon_model, data.frame(i = 0)) - first_lon_cell_width <- (tmp_lon[1] - prev_lon) - # The signif is needed because cdo sellonlatbox crashes with too many digits - lon_extremes[1] <- tmp_lon[1] - first_lon_cell_width / 2 - } else { - lon_extremes[1] <- min(tmp_lon) - } - if (which.max(tmp_lon) == length(tmp_lon)) { - right_is_max <- TRUE - next_lon <- predict(lon_model, data.frame(i = length(tmp_lon) + 1)) - last_lon_cell_width <- (next_lon - tmp_lon[length(tmp_lon)]) - lon_extremes[2] <- tmp_lon[length(tmp_lon)] + last_lon_cell_width / 2 - } else { - lon_extremes[2] <- max(tmp_lon) - } - # Adjust the crop window if possible in order to keep lons from 0 to 360 - # or from -180 to 180 when the extremes of the cropped window are contiguous. - if (right_is_max) { - if (lon_extremes[1] < -180) { - if (!((lon_extremes[2] < 180) && !((180 - lon_extremes[2]) <= last_lon_cell_width / 2))) { - lon_extremes[1] <- -180 - lon_extremes[2] <- 180 - } - } else if (lon_extremes[1] < 0) { - if (!((lon_extremes[2] < 360) && !((360 - lon_extremes[2]) <= last_lon_cell_width / 2))) { - lon_extremes[1] <- 0 - lon_extremes[2] <- 360 - } - } - } - if (left_is_min) { - if (lon_extremes[2] > 360) { - if (!((lon_extremes[1] > 0) && !(lon_extremes[1] <= first_lon_cell_width / 2))) { - lon_extremes[1] <- 0 - lon_extremes[2] <- 360 - } - } else if (lon_extremes[2] > 180) { - if (!((lon_extremes[1] > -180) && !((180 + lon_extremes[1]) <= first_lon_cell_width / 2))) { - lon_extremes[1] <- -180 - lon_extremes[2] <- 180 - } - } - } -## lon_extremes <- signif(lon_extremes, 5) -## lon_extremes <- lon_extremes + 0.00001 -###--- - if (length(dim(lats)) == 1) { - tmp_lat <- lats - } else { - min_pos <- which(lats == min(lats), arr.ind = TRUE)[1, ] - tmp_lat <- Subset(lats, lon_dim, min_pos[which(names(dim(lats)) == lon_dim)], drop = 'selected') - } - i <- 1:length(tmp_lat) - degree <- min(3, length(i) - 1) - lat_model <- lm(tmp_lat ~ poly(i, degree)) - lat_extremes <- c(NA, NA) - if (which.min(tmp_lat) == 1) { - prev_lat <- predict(lat_model, data.frame(i = 0)) - lat_extremes[1] <- tmp_lat[1] - (tmp_lat[1] - prev_lat) / 2 - } else { - lat_extremes[1] <- min(tmp_lat) - } - if (which.max(tmp_lat) == length(tmp_lat)) { - next_lat <- predict(lat_model, data.frame(i = length(tmp_lat) + 1)) - lat_extremes[2] <- tmp_lat[length(tmp_lat)] + (next_lat - tmp_lat[length(tmp_lat)]) / 2 - } else { - lat_extremes[2] <- max(tmp_lat) - } -## lat_extremes <- signif(lat_extremes, 5) - # Adjust crop window - if (lat_extremes[1] < -90) { - lat_extremes[1] <- -90 - } else if (lat_extremes[1] > 90) { - lat_extremes[1] <- 90 - } - if (lat_extremes[2] < -90) { - lat_extremes[2] <- -90 - } else if (lat_extremes[2] > 90) { - lat_extremes[2] <- 90 - } -###--- - } - } else if (crop == FALSE) { - warning("Parameter 'crop' = 'FALSE'. The output grid range will follow parameter 'grid'.") - } - } else if (is.numeric(crop)) { - if (length(crop) != 4) { - stop("Paramrter 'crop' must be a logical value or a numeric vector of length 4: c(western border, eastern border, southern border, northern border.") - } else { - lon_extremes <- crop[1:2] - lat_extremes <- crop[3:4] - crop <- TRUE - } - } else { - stop("Parameter 'crop' must be a logical value or a numeric vector.") - } - # Check force_remap - if (!is.logical(force_remap)) { - stop("Parameter 'force_remap' must be a logical value.") - } - # Check write_dir - if (!is.character(write_dir)) { - stop("Parameter 'write_dir' must be a character string.") - } - if (!dir.exists(write_dir)) { - stop("Parameter 'write_dir' must point to an existing directory.") - } -# if (!is.null(mask)) { -# if (!is.numeric(mask) || !is.array(mask)) { -# stop("Parameter 'mask' must be a numeric array.") -# } -# if (length(dim(mask)) != 2) { -# stop("Parameter 'mask' must have two dimensions.") -# } -# if (is.null(names(dim(mask)))) { -# if (dim(data_array)[lat_dim] == dim(data_array)[lon_dim]) { -# stop("Cannot disambiguate which is the longitude dimension of ", -# "the provided 'mask'. Provide it with dimension names.") -# } -# names(dim(mask)) <- c('', '') -# found_lon_dim <- which(dim(mask) == dim(data_array)[lon_dim]) -# if (length(found_lon_dim) < 0) { -# stop("The dimension sizes of the provided 'mask' do not match ", -# "the spatial dimension sizes of the array to interpolate.") -# } else { -# names(dim(mask)[found_lon_dim]) <- lon_dim -# } -# found_lat_dim <- which(dim(mask) == dim(data_array)[lat_dim]) -# if (length(found_lat_dim) < 0) { -# stop("The dimension sizes of the provided 'mask' do not match ", -# "the spatial dimension sizes of the array to interpolate.") -# } else { -# names(dim(mask)[found_lat_dim]) <- lat_dim -# } -# } -# lon_position <- which(names(dim(data_array)) == lon_dim) -# lat_position <- which(names(dim(data_array)) == lat_dim) -# if (lon_position > lat_position) { -# if (names(dim(mask))[1] == lon_dim) { -# mask <- t(mask) -# } -# } else { -# if (names(dim(mask))[1] == lat_dim) { -# mask <- t(mask) -# } -# } -# ## TODO: Apply mask!!! Preserve attributes -# } - # Check if interpolation can be skipped. - interpolation_needed <- TRUE - if (!force_remap) { - if (!(grid_type == 'custom')) { - if (length(lons) == grid_lons && length(lats) == grid_lats) { - if (grid_type == 'regular') { - if (.isRegularVector(lons) && .isRegularVector(lats)) { - interpolation_needed <- FALSE - } - } else if (grid_type == 'gaussian') { - # TODO: improve this check. Gaussian quadrature should be used. - if (.isRegularVector(lons) && !.isRegularVector(lats)) { - interpolation_needed <- FALSE - } - } - } - } - } - found_lons <- lons - found_lats <- lats - if (interpolation_needed) { - if (nchar(Sys.which('cdo')[1]) < 1) { - stop("CDO must be installed in order to use the .CDORemap.") - } - cdo_version <- as.numeric_version( - strsplit(suppressWarnings(system2("cdo", args = '-V', stderr = TRUE))[[1]], ' ')[[1]][5] - ) - warning("CDORemap: Using CDO version ", cdo_version, ".") - if ((cdo_version >= as.numeric_version('1.7.0')) && (method == 'con')) { - method <- 'ycon' - } - # CDO takes arrays of 3 dimensions or 4 if one of them is unlimited. - # The unlimited dimension can only be the left-most (right-most in R). - # There are no restrictions for the dimension names or variable names. - # The longitude and latitude are detected by their units. - # There are no restrictions for the order of the limited dimensions. - # The longitude/latitude variables and dimensions must have the same name. - # The procedure consists in: - # - take out the array metadata - # - be aware of var dimension (replacing the dimension names would do). - # - take arrays of 4 dimensions always if possible - # - make the last dimension unlimited when saving to netcdf - # - if the last dimension is lon or lat, either reorder the array and - # then reorder back or iterate over the dimensions at the right - # side of lon AND lat. - # If the input array has more than 4 dimensions, it is needed to - # run CDO on each sub-array of 4 dimensions because it can handle - # only up to 4 dimensions. The shortest dimensions are chosen to - # iterate over. - is_irregular <- FALSE - if (length(dim(lats)) > 1 && length(dim(lons)) > 1) { - is_irregular <- TRUE - } - attribute_backup <- attributes(data_array) - other_dims <- which(!(names(dim(data_array)) %in% c(lon_dim, lat_dim))) - permutation <- NULL - unlimited_dim <- NULL - dims_to_iterate <- NULL - total_slices <- 1 - other_dims_per_chunk <- ifelse(is_irregular, 1, 2) # 4 (the maximum accepted by CDO) - 2 (lon, lat) = 2. - if (length(other_dims) > 1 || (length(other_dims) > 0 && (is_irregular))) { - if (!(length(dim(data_array)) %in% other_dims)) { - if (avoid_writes || is_irregular) { - dims_mod <- dim(data_array) - dims_mod[which(names(dim(data_array)) %in% - c(lon_dim, lat_dim))] <- 0 - dim_to_move <- which.max(dims_mod) - permutation <- (1:length(dim(data_array)))[-dim_to_move] - permutation <- c(permutation, dim_to_move) - permutation_back <- sort(permutation, index.return = TRUE)$ix - dim_backup <- dim(data_array) - data_array <- aperm(data_array, permutation) - dim(data_array) <- dim_backup[permutation] - other_dims <- which(!(names(dim(data_array)) %in% c(lon_dim, lat_dim))) - } else { - # We allow only lon, lat and 1 more dimension per chunk, so - # CDO has no restrictions in the order. - other_dims_per_chunk <- 1 - } - } - other_dims_ordered_by_size <- other_dims[sort(dim(data_array)[other_dims], index.return = TRUE)$ix] - dims_to_iterate <- sort(head(other_dims_ordered_by_size, length(other_dims) - other_dims_per_chunk)) - if (length(dims_to_iterate) == 0) { - dims_to_iterate <- NULL - } else { - slices_to_iterate <- array(1:prod(dim(data_array)[dims_to_iterate]), - dim(data_array)[dims_to_iterate]) - total_slices <- prod(dim(slices_to_iterate)) - } - if ((other_dims_per_chunk > 1) || (other_dims_per_chunk > 0 && is_irregular)) { - unlimited_dim <- tail(sort(tail(other_dims_ordered_by_size, other_dims_per_chunk)), 1) - #unlimited_dim <- tail(other_dims) - } - } - - result_array <- NULL - lon_pos <- which(names(dim(data_array)) == lon_dim) - lat_pos <- which(names(dim(data_array)) == lat_dim) - dim_backup <- dim(data_array) - attributes(data_array) <- NULL - dim(data_array) <- dim_backup - names(dim(data_array)) <- paste0('dim', 1:length(dim(data_array))) - names(dim(data_array))[c(lon_pos, lat_pos)] <- c(lon_dim, lat_dim) - if (!is.null(unlimited_dim)) { - # This will make ArrayToNetCDF create this dim as unlimited. - names(dim(data_array))[unlimited_dim] <- 'time' - } - if (length(dim(lons)) == 1) { - names(dim(lons)) <- lon_dim - } - if (length(dim(lats)) == 1) { - names(dim(lats)) <- lat_dim - } - if (length(dim(lons)) > 1) { - lon_var_name <- paste0(lon_dim, '_var') - } else { - lon_var_name <- lon_dim - } - if (length(dim(lats)) > 1) { - lat_var_name <- paste0(lat_dim, '_var') - } else { - lat_var_name <- lat_dim - } - if (is_irregular) { - metadata <- list(list(coordinates = paste(lon_var_name, lat_var_name))) - names(metadata) <- 'var' - attr(data_array, 'variables') <- metadata - } - names(attr(lons, 'variables')) <- lon_var_name - names(attr(lats, 'variables')) <- lat_var_name - if (!is.null(attr(lons, 'variables')[[1]][['dim']])) { - attr(lons, 'variables')[[1]][['dim']] <- NULL - } - if (!is.null(attr(lats, 'variables')[[1]][['dim']])) { - attr(lats, 'variables')[[1]][['dim']] <- NULL - } - lons_lats_taken <- FALSE - for (i in 1:total_slices) { - tmp_file <- tempfile('R_CDORemap_', write_dir, fileext = '.nc') - tmp_file2 <- tempfile('R_CDORemap_', write_dir, fileext = '.nc') - if (!is.null(dims_to_iterate)) { - slice_indices <- which(slices_to_iterate == i, arr.ind = TRUE) - subset <- Subset(data_array, dims_to_iterate, as.list(slice_indices), drop = 'selected') -# dims_before_crop <- dim(subset) - # Make sure subset goes along with metadata - ArrayToNetCDF(setNames(list(subset, lons, lats), c('var', lon_var_name, lat_var_name)), tmp_file) - } else { -# dims_before_crop <- dim(data_array) - ArrayToNetCDF(setNames(list(data_array, lons, lats), c('var', lon_var_name, lat_var_name)), tmp_file) - } - sellonlatbox <- '' - if (crop) { - sellonlatbox <- paste0('sellonlatbox,', format(lon_extremes[1], scientific = FALSE), - ',', format(lon_extremes[2], scientific = FALSE), - ',', format(lat_extremes[1], scientific = FALSE), - ',', format(lat_extremes[2], scientific = FALSE), ' -') - } - err <- try({ - system(paste0("cdo -s ", sellonlatbox, "remap", method, ",", grid, " ", tmp_file, " ", tmp_file2)) - }) - file.remove(tmp_file) - if (('try-error' %in% class(err)) || err > 0) { - stop("CDO remap failed.") - } - ncdf_remapped <- nc_open(tmp_file2) - if (!lons_lats_taken) { - found_dim_names <- sapply(ncdf_remapped$var$var$dim, '[[', 'name') - found_lon_dim <- found_dim_names[which(found_dim_names %in% .KnownLonNames())[1]] - found_lat_dim <- found_dim_names[which(found_dim_names %in% .KnownLatNames())[1]] - found_lon_dim_size <- length(ncdf_remapped$dim[[found_lon_dim]]$vals) - found_lat_dim_size <- length(ncdf_remapped$dim[[found_lat_dim]]$vals) - found_var_names <- names(ncdf_remapped$var) - found_lon_var_name <- which(found_var_names %in% .KnownLonNames()) - found_lat_var_name <- which(found_var_names %in% .KnownLatNames()) - if (length(found_lon_var_name) > 0) { - found_lon_var_name <- found_var_names[found_lon_var_name[1]] - } else { - found_lon_var_name <- NULL - } - if (length(found_lat_var_name) > 0) { - found_lat_var_name <- found_var_names[found_lat_var_name[1]] - } else { - found_lat_var_name <- NULL - } - if (length(found_lon_var_name) > 0) { - found_lons <- ncvar_get(ncdf_remapped, found_lon_var_name, - collapse_degen = FALSE) - } else { - found_lons <- ncdf_remapped$dim[[found_lon_dim]]$vals - dim(found_lons) <- found_lon_dim_size - } - if (length(found_lat_var_name) > 0) { - found_lats <- ncvar_get(ncdf_remapped, found_lat_var_name, - collapse_degen = FALSE) - } else { - found_lats <- ncdf_remapped$dim[[found_lat_dim]]$vals - dim(found_lats) <- found_lat_dim_size - } - if (length(dim(lons)) == length(dim(found_lons))) { - new_lon_name <- lon_dim - } else { - new_lon_name <- found_lon_dim - } - if (length(dim(lats)) == length(dim(found_lats))) { - new_lat_name <- lat_dim - } else { - new_lat_name <- found_lat_dim - } - if (length(dim(found_lons)) > 1) { - if (which(sapply(ncdf_remapped$var$lon$dim, '[[', 'name') == found_lon_dim) < - which(sapply(ncdf_remapped$var$lon$dim, '[[', 'name') == found_lat_dim)) { - names(dim(found_lons)) <- c(new_lon_name, new_lat_name) - } else { - names(dim(found_lons)) <- c(new_lat_name, new_lon_name) - } - } else { - names(dim(found_lons)) <- new_lon_name - } - if (length(dim(found_lats)) > 1) { - if (which(sapply(ncdf_remapped$var$lat$dim, '[[', 'name') == found_lon_dim) < - which(sapply(ncdf_remapped$var$lat$dim, '[[', 'name') == found_lat_dim)) { - names(dim(found_lats)) <- c(new_lon_name, new_lat_name) - } else { - names(dim(found_lats)) <- c(new_lat_name, new_lon_name) - } - } else { - names(dim(found_lats)) <- new_lat_name - } - lons_lats_taken <- TRUE - } - if (!is.null(dims_to_iterate)) { - if (is.null(result_array)) { - if (return_array) { - new_dims <- dim(data_array) - new_dims[c(lon_dim, lat_dim)] <- c(found_lon_dim_size, found_lat_dim_size) - result_array <- array(dim = new_dims) - store_indices <- as.list(rep(TRUE, length(dim(result_array)))) - } - } - if (return_array) { - store_indices[dims_to_iterate] <- as.list(slice_indices) - result_array <- do.call('[<-', c(list(x = result_array), store_indices, - list(value = ncvar_get(ncdf_remapped, 'var', collapse_degen = FALSE)))) - } - } else { - new_dims <- dim(data_array) - new_dims[c(lon_dim, lat_dim)] <- c(found_lon_dim_size, found_lat_dim_size) - result_array <- ncvar_get(ncdf_remapped, 'var', collapse_degen = FALSE) - dim(result_array) <- new_dims - } - nc_close(ncdf_remapped) - file.remove(tmp_file2) - } - if (!is.null(permutation)) { - dim_backup <- dim(result_array) - result_array <- aperm(result_array, permutation_back) - dim(result_array) <- dim_backup[permutation_back] - } - # Now restore the metadata - result_is_irregular <- FALSE - if (length(dim(found_lats)) > 1 && length(dim(found_lons)) > 1) { - result_is_irregular <- TRUE - } - attribute_backup[['dim']][which(names(dim(result_array)) == lon_dim)] <- dim(result_array)[lon_dim] - attribute_backup[['dim']][which(names(dim(result_array)) == lat_dim)] <- dim(result_array)[lat_dim] - names(attribute_backup[['dim']])[which(names(dim(result_array)) == lon_dim)] <- new_lon_name - names(attribute_backup[['dim']])[which(names(dim(result_array)) == lat_dim)] <- new_lat_name - if (!is.null(attribute_backup[['variables']]) && (length(attribute_backup[['variables']]) > 0)) { - for (var in 1:length(attribute_backup[['variables']])) { - if (length(attribute_backup[['variables']][[var]][['dim']]) > 0) { - for (dim in 1:length(attribute_backup[['variables']][[var]][['dim']])) { - dim_name <- NULL - if ('name' %in% names(attribute_backup[['variables']][[var]][['dim']][[dim]])) { - dim_name <- attribute_backup[['variables']][[var]][['dim']][[dim]][['name']] - if (dim_name %in% c(lon_dim, lat_dim)) { - if (dim_name == lon_dim) { - attribute_backup[['variables']][[var]][['dim']][[dim]][['name']] <- new_lon_name - } else { - attribute_backup[['variables']][[var]][['dim']][[dim]][['name']] <- new_lat_name - } - } - } else if (!is.null(names(attribute_backup[['variables']][[var]][['dim']]))) { - dim_name <- names(attribute_backup[['variables']][[var]][['dim']])[dim] - if (dim_name %in% c(lon_dim, lat_dim)) { - if (dim_name == lon_dim) { - names(attribute_backup[['variables']][[var]][['dim']])[which(names(attribute_backup[['variables']][[var]][['dim']]) == lon_dim)] <- new_lon_name - } else { - names(attribute_backup[['variables']][[var]][['dim']])[which(names(attribute_backup[['variables']][[var]][['dim']]) == lat_dim)] <- new_lat_name - } - } - } - if (!is.null(dim_name)) { - if (dim_name %in% c(lon_dim, lat_dim)) { - if (dim_name == lon_dim) { - new_vals <- found_lons[TRUE] - } else if (dim_name == lat_dim) { - new_vals <- found_lats[TRUE] - } - if (!is.null(attribute_backup[['variables']][[var]][['dim']][[dim]][['len']])) { - attribute_backup[['variables']][[var]][['dim']][[dim]][['len']] <- length(new_vals) - } - if (!is.null(attribute_backup[['variables']][[var]][['dim']][[dim]][['vals']])) { - if (!result_is_irregular) { - attribute_backup[['variables']][[var]][['dim']][[dim]][['vals']] <- new_vals - } else { - attribute_backup[['variables']][[var]][['dim']][[dim]][['vals']] <- 1:length(new_vals) - } - } - } - } - } - } - if (!is_irregular && result_is_irregular) { - attribute_backup[['coordinates']] <- paste(lon_var_name, lat_var_name) - } else if (is_irregular && !result_is_irregular) { - attribute_backup[['coordinates']] <- NULL - } - } - } - attributes(result_array) <- attribute_backup - lons_attr_bk[['dim']] <- dim(found_lons) - if (!is.null(lons_attr_bk[['variables']]) && (length(lons_attr_bk[['variables']]) > 0)) { - for (var in 1:length(lons_attr_bk[['variables']])) { - if (length(lons_attr_bk[['variables']][[var]][['dim']]) > 0) { - dims_to_remove <- NULL - for (dim in 1:length(lons_attr_bk[['variables']][[var]][['dim']])) { - dim_name <- NULL - if ('name' %in% names(lons_attr_bk[['variables']][[var]][['dim']][[dim]])) { - dim_name <- lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] - if (dim_name %in% c(lon_dim, lat_dim)) { - if (dim_name == lon_dim) { - lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] <- new_lon_name - } else { - lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] <- new_lat_name - } - } - } else if (!is.null(names(lons_attr_bk[['variables']][[var]][['dim']]))) { - dim_name <- names(lons_attr_bk[['variables']][[var]][['dim']])[dim] - if (dim_name %in% c(lon_dim, lat_dim)) { - if (dim_name == lon_dim) { - names(lons_attr_bk[['variables']][[var]][['dim']])[which(names(lons_attr_bk[['variables']][[var]][['dim']]) == lon_dim)] <- new_lon_name - } else { - names(lons_attr_bk[['variables']][[var]][['dim']])[which(names(lons_attr_bk[['variables']][[var]][['dim']]) == lat_dim)] <- new_lat_name - } - } - } - if (!is.null(dim_name)) { - if (dim_name %in% c(lon_dim, lat_dim)) { - if (dim_name == lon_dim) { - new_vals <- found_lons[TRUE] - } else if (dim_name == lat_dim) { - new_vals <- found_lats[TRUE] - if (!result_is_irregular) { - dims_to_remove <- c(dims_to_remove, dim) - } - } - if (!is.null(lons_attr_bk[['variables']][[var]][['dim']][[dim]][['len']])) { - lons_attr_bk[['variables']][[var]][['dim']][[dim]][['len']] <- length(new_vals) - } - if (!is.null(lons_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']])) { - if (!result_is_irregular) { - lons_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']] <- new_vals - } else { - lons_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']] <- 1:length(new_vals) - } - } - } - } - } - if (length(dims_to_remove) > 1) { - lons_attr_bk[['variables']][[var]][['dim']] <- lons_attr_bk[['variables']][[var]][['dim']][[-dims_to_remove]] - } - } - } - names(lons_attr_bk[['variables']])[1] <- lon_var_name - lons_attr_bk[['variables']][[1]][['units']] <- 'degrees_east' - } - attributes(found_lons) <- lons_attr_bk - lats_attr_bk[['dim']] <- dim(found_lats) - if (!is.null(lats_attr_bk[['variables']]) && (length(lats_attr_bk[['variables']]) > 0)) { - for (var in 1:length(lats_attr_bk[['variables']])) { - if (length(lats_attr_bk[['variables']][[var]][['dim']]) > 0) { - dims_to_remove <- NULL - for (dim in 1:length(lats_attr_bk[['variables']][[var]][['dim']])) { - dim_name <- NULL - if ('name' %in% names(lats_attr_bk[['variables']][[var]][['dim']][[dim]])) { - dim_name <- lats_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] - if (dim_name %in% c(lon_dim, lat_dim)) { - if (dim_name == lon_dim) { - lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] <- new_lon_name - } else { - lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] <- new_lat_name - } - } - } else if (!is.null(names(lats_attr_bk[['variables']][[var]][['dim']]))) { - dim_name <- names(lats_attr_bk[['variables']][[var]][['dim']])[dim] - if (dim_name %in% c(lon_dim, lat_dim)) { - if (dim_name == lon_dim) { - names(lats_attr_bk[['variables']][[var]][['dim']])[which(names(lats_attr_bk[['variables']][[var]][['dim']]) == lon_dim)] <- new_lon_name - } else { - names(lats_attr_bk[['variables']][[var]][['dim']])[which(names(lats_attr_bk[['variables']][[var]][['dim']]) == lat_dim)] <- new_lat_name - } - } - } - if (!is.null(dim_name)) { - if (dim_name %in% c(lon_dim, lat_dim)) { - if (dim_name == lon_dim) { - new_vals <- found_lons[TRUE] - if (!result_is_irregular) { - dims_to_remove <- c(dims_to_remove, dim) - } - } else if (dim_name == lat_dim) { - new_vals <- found_lats[TRUE] - } - if (!is.null(lats_attr_bk[['variables']][[var]][['dim']][[dim]][['len']])) { - lats_attr_bk[['variables']][[var]][['dim']][[dim]][['len']] <- length(new_vals) - } - if (!is.null(lats_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']])) { - if (!result_is_irregular) { - lats_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']] <- new_vals - } else { - lats_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']] <- 1:length(new_vals) - } - } - } - } - } - if (length(dims_to_remove) > 1) { - lats_attr_bk[['variables']][[var]][['dim']] <- lats_attr_bk[['variables']][[var]][['dim']][[-dims_to_remove]] - } - } - } - names(lats_attr_bk[['variables']])[1] <- lat_var_name - lats_attr_bk[['variables']][[1]][['units']] <- 'degrees_north' - } - attributes(found_lats) <- lats_attr_bk - } - list(data_array = if (return_array) { - if (interpolation_needed) { - result_array - } else { - data_array - } - } else { - NULL - }, - lons = found_lons, lats = found_lats) -} diff --git a/R/Clim.R b/R/Clim.R index f8299a3e90a84824989f6fe650ced2f45843e6ac..1040e368f0c61b15a966639e5cfe2aeb52871396 100644 --- a/R/Clim.R +++ b/R/Clim.R @@ -51,16 +51,11 @@ #' dimension 'memb_dim' is also removed. #'} #' -#'@keywords datagen -#'@author History:\cr -#' 0.9 - 2011-03 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -#' 1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to R CRAN -#' 3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Adopt multiApply feature #'@examples #'# Load sample data as in Load() example: #'example(Load) #'clim <- Clim(sampleData$mod, sampleData$obs) -#'clim2 <- Clim(sampleData$mod, sampleData$obs, method = 'kharin', memb = F) +#'clim2 <- Clim(sampleData$mod, sampleData$obs, method = 'kharin', memb = FALSE) #'\donttest{ #'PlotClim(clim$clim_exp, clim$clim_obs, #' toptitle = paste('sea surface temperature climatologies'), @@ -68,6 +63,7 @@ #' listobs = c('ERSST'), biglab = FALSE, fileout = 'tos_clim.eps') #'} #'@importFrom abind adrop +#'@importFrom ClimProjDiags Subset #'@import multiApply #'@export Clim <- function(exp, obs, time_dim = 'sdate', dat_dim = c('dataset', 'member'), diff --git a/R/ColorBar.R b/R/ColorBar.R index 49b82af81cd31748be23f9026b03a1c05151853f..206f68dc9c2fba5337f2adb21c77c31d3e582494 100644 --- a/R/ColorBar.R +++ b/R/ColorBar.R @@ -134,14 +134,6 @@ #' bar (NULL if not drawn at all). #'} #' -#'@keywords hplot -#'@author History:\cr -#' 0.1 - 2012-04 (V. Guemas, \email{virginie.guemas@@bsc.es}) - Original code\cr -#' 0.2 - 2013-04 (I. Andreu-Burillo, \email{isabel.andreu-burillo@@bsc.es}) - Vert option\cr -#' 1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@@bsc.es}) - Formatting to CRAN\cr -#' 1.1 - 2013-09 (C. Prodhomme, \email{chloe.prodhomme@@bsc.es}) - Add cex option\cr -#' 1.2 - 2016-08 (N. Manubens, \email{nicolau.manubens@@bsc.es}) - New ColorBar\cr -#' (V. Torralba, \email{veronica.torralba@@bsc.es}) #'@examples #'cols <- c("dodgerblue4", "dodgerblue1", "forestgreen", "yellowgreen", "white", #' "white", "yellow", "orange", "red", "saddlebrown") diff --git a/R/ConfigApplyMatchingEntries.R b/R/ConfigApplyMatchingEntries.R new file mode 100644 index 0000000000000000000000000000000000000000..21a304bb2bbd638335b0a552b091aaa806c51711 --- /dev/null +++ b/R/ConfigApplyMatchingEntries.R @@ -0,0 +1,155 @@ +#'Apply Matching Entries To Dataset Name And Variable Name To Find Related Info +#' +#'Given a pair of dataset name and variable name, this function determines +#'applies all the matching entries found in the corresponding configuration +#'table to work out the dataset main path, file path, actual name of variable +#'inside NetCDF files, ... +#' +#'@param configuration Configuration object obtained from ConfigFileOpen() +#' or ConfigFileCreate(). +#'@param var Name of the variable to load. Will be interpreted as a string, +#' regular expressions do not apply here. +#' Examples: 'tas' or 'tasmax_q90'. +#'@param exp Set of experimental dataset identifiers. Will be interpreted as +#' a strings, regular expressions do not apply here. Can be NULL (not to +#' check in experimental dataset tables), and takes by default NULL. +#' Examples: c('EnsEcmwfSeas', 'EnsUkmoSeas'), c('i00k'). +#'@param obs Set of observational dataset identifiers. Will be interpreted as +#' a strings, regular expressions do not apply here. Can be NULL (not to +#' check in observational dataset tables), and takes by default NULL. +#' Examples: c('GLORYS', 'ERAint'), c('NCEP'). +#'@param show_entries Flag to stipulate whether to show the found matching +#' entries for all datasets and variable name. +#'@param show_result Flag to stipulate whether to show the result of applying +#' all the matching entries (dataset main path, file path, ...). +#' +#'@return A list with the information resulting of applying the matching +#' entries is returned. +#'@seealso ConfigApplyMatchingEntries, ConfigEditDefinition, +#' ConfigEditEntry, ConfigFileOpen, ConfigShowSimilarEntries, +#' ConfigShowTable +#'@examples +#'# Create an empty configuration file +#'config_file <- paste0(tempdir(), "/example.conf") +#'s2dv::ConfigFileCreate(config_file, confirm = FALSE) +#'# Open it into a configuration object +#'configuration <- ConfigFileOpen(config_file) +#'# Add an entry at the bottom of 4th level of file-per-startdate experiments +#'# table which will associate the experiment "ExampleExperiment2" and variable +#'# "ExampleVariable" to some information about its location. +#'configuration <- ConfigAddEntry(configuration, "experiments", +#' "last", "ExampleExperiment2", "ExampleVariable", +#' "/path/to/ExampleExperiment2/", +#' "ExampleVariable/ExampleVariable_$START_DATE$.nc") +#'# Edit entry to generalize for any variable. Changing variable needs . +#'configuration <- ConfigEditEntry(configuration, "experiments", 1, +#' var_name = ".*", +#' file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +#'# Now apply matching entries for variable and experiment name and show the +#'# result +#'match_info <- ConfigApplyMatchingEntries(configuration, 'tas', +#' exp = c('ExampleExperiment2'), show_result = TRUE) +#'@export +ConfigApplyMatchingEntries <- function(configuration, var, exp = NULL, obs = NULL, show_entries = FALSE, show_result = TRUE) { + ## Function to tell if a regexpr() match is a complete match to a specified name + isFullMatch <- function(x, name) { + ifelse(x > 0 && attributes(x)$match.length == nchar(name), TRUE, FALSE) + } + + var_entries_in_exps <- c() + if (length(unlist(configuration$experiments, recursive = FALSE)) > 0) { + var_entries_in_exps <- which(unlist(lapply(lapply(lapply(as.list(unlist(lapply(configuration$experiments, lapply, "[[", 2))), .ConfigReplaceVariablesInString, configuration$definitions), regexpr, var), isFullMatch, var) > 0)) + } + var_entries_in_obs <- c() + if (length(unlist(configuration$observations, recursive = FALSE)) > 0) { + var_entries_in_obs <- which(unlist(lapply(lapply(lapply(as.list(unlist(lapply(configuration$observations, lapply, "[[", 2))), .ConfigReplaceVariablesInString, configuration$definitions), regexpr, var), isFullMatch, var) > 0)) + } + + exp_info <- list() + jmod <- 1 + for (mod in exp) { + mod_var_matching_entries <- mod_var_matching_indices <- mod_var_matching_entries_levels <- c() + + if (length(unlist(configuration$experiments, recursive = FALSE)) > 0) { + mod_entries_in_exps <- which(unlist(lapply(lapply(lapply(unlist(lapply(configuration$experiments, lapply, "[[", 1), recursive = FALSE), .ConfigReplaceVariablesInString, configuration$definitions), regexpr, mod), isFullMatch, mod))) + if (length(mod_entries_in_exps) > 0) { + mod_var_matching_indices <- intersect(var_entries_in_exps, mod_entries_in_exps) + mod_var_matching_entries <- unlist(configuration$experiments, recursive = FALSE)[mod_var_matching_indices] + exps_levels <- lapply(as.list(1:4), f <- function(x) {x <- array(x, length(configuration$experiments[[x]]))}) + mod_var_matching_entries_levels <- unlist(exps_levels)[intersect(var_entries_in_exps, mod_entries_in_exps)] + } + } + + if (length(mod_var_matching_entries) == 0) { + stop(paste('Error: There are no matching entries in the configuration file for the experiment', mod, 'and the variable', var, + '. Please check the configuration file.)')) + } else { + if (show_entries) { + header <- paste0("# Matching entries for experiment '", exp[jmod], "' and variable '", var, "' #") + .message(paste(rep("#", nchar(header) - 1), collapse = '')) + .message(header) + .message(paste(rep("#", nchar(header) - 1), collapse = '')) + ConfigShowTable(list(experiments = list(mod_var_matching_entries)), 'experiments', mod_var_matching_indices) + cat("\n") + } + result <- .ConfigGetDatasetInfo(mod_var_matching_entries, 'experiments') + if (show_result) { + .message(paste0("The result of applying the matching entries to experiment name '", exp[jmod], "' and variable name '", var, "' is:")) + configuration$definitions[["VAR_NAME"]] <- var + configuration$definitions[["EXP_NAME"]] <- exp[jmod] + fields <- c("MAIN_PATH: ", "FILE_PATH: ", "NC_VAR_NAME: ", "SUFFIX: ", "VAR_MIN: ", "VAR_MAX: ") + values <- lapply(result, lapply, function (x) .ConfigReplaceVariablesInString(x, configuration$definitions, TRUE)) + lapply(paste0(fields, unlist(values), "\n"), cat) + cat("\n") + } + exp_info <- c(exp_info, list(result)) + } + + jmod <- jmod + 1 + } + + obs_info <- list() + jobs <- 1 + for (ref in obs) { + ref_var_matching_entries <- ref_var_matching_indices <- ref_var_matching_entries_levels <- c() + + if (length(unlist(configuration$observations, recursive = FALSE)) > 0) { + ref_entries_in_obs <- which(unlist(lapply(lapply(lapply(unlist(lapply(configuration$observations, lapply, "[[", 1), recursive = FALSE), .ConfigReplaceVariablesInString, configuration$definitions), regexpr, ref), isFullMatch, ref))) + if (length(ref_entries_in_obs) > 0) { + ref_var_matching_indices <- intersect(var_entries_in_obs, ref_entries_in_obs) + ref_var_matching_entries <- unlist(configuration$observations, recursive = FALSE)[ref_var_matching_indices] + obs_levels <- lapply(as.list(1:4), f <- function(x) {x <- array(x, length(configuration$observations[[x]]))}) + ref_var_matching_entries_levels <- unlist(obs_levels)[intersect(var_entries_in_obs, ref_entries_in_obs)] + } + } + + if (length(ref_var_matching_entries) == 0) { + stop(paste('Error: There are no matching entries in the configuration file for the observation', ref, 'and the variable', var, + '. Please check the configuration file.)')) + } else { + if (show_entries) { + header <- paste0("# Matching entries for observation '", obs[jobs], "' and variable '", var, "' #\n") + .message(paste(rep("#", nchar(header) - 1), collapse = '')) + .message(header) + .message(paste(rep("#", nchar(header) - 1), collapse = '')) + ConfigShowTable(list(observations = list(ref_var_matching_entries)), 'observations', ref_var_matching_indices) + cat("\n") + } + result <- .ConfigGetDatasetInfo(ref_var_matching_entries, 'observations') + if (show_result) { + .message(paste0("The result of applying the matching entries to observation name '", obs[jobs], "' and variable name '", var, "' is:")) + configuration$definitions[['VAR_NAME']] <- var + configuration$definitions[["OBS_NAME"]] <- obs[jobs] + fields <- c("MAIN_PATH: ", "FILE_PATH: ", "NC_VAR_NAME: ", "SUFFIX: ", "VAR_MIN: ", "VAR_MAX: ") + values <- lapply(result, lapply, function (x) .ConfigReplaceVariablesInString(x, configuration$definitions, TRUE)) + lapply(paste0(fields, unlist(values), "\n"), cat) + cat("\n") + } + obs_info <- c(obs_info, list(result)) + } + + jobs <- jobs + 1 + } + + invisible(list(exp_info = exp_info, obs_info = obs_info)) +} diff --git a/R/ConfigEditDefinition.R b/R/ConfigEditDefinition.R new file mode 100644 index 0000000000000000000000000000000000000000..e0cf1a0da01ac06e3ccccd6e9c95a25d77db426a --- /dev/null +++ b/R/ConfigEditDefinition.R @@ -0,0 +1,65 @@ +#'Add Modify Or Remove Variable Definitions In Configuration +#' +#'These functions help in adding, modifying or removing variable definitions +#'in a configuration object obtained with \code{\link{ConfigFileOpen}} or +#'\code{\link{ConfigFileCreate}}. ConfigEditDefinition() will add the +#'definition if not existing. +#' +#'@param configuration Configuration object obtained wit ConfigFileOpen() or +#' ConfigFileCreate(). +#'@param name Name of the variable to add/modify/remove. +#'@param value Value to associate to the variable. +#'@param confirm Flag to stipulate whether to ask for confirmation if the +#' variable is being modified. Takes by default TRUE. +#' +#'@return A modified configuration object is returned. +#'@seealso [ConfigApplyMatchingEntries()], [ConfigEditDefinition()], +#' [ConfigEditEntry()], [ConfigFileOpen()], [ConfigShowSimilarEntries()], +#' [ConfigShowTable()]. +#'@examples +#'# Create an empty configuration file +#'config_file <- paste0(tempdir(), "/example.conf") +#'ConfigFileCreate(config_file, confirm = FALSE) +#'# Open it into a configuration object +#'configuration <- ConfigFileOpen(config_file) +#'# Add an entry at the bottom of 4th level of file-per-startdate experiments +#'# table which will associate the experiment "ExampleExperiment2" and variable +#'# "ExampleVariable" to some information about its location. +#'configuration <- ConfigAddEntry(configuration, "experiments", +#' "last", "ExampleExperiment2", "ExampleVariable", +#' "/path/to/ExampleExperiment2/", +#' "ExampleVariable/ExampleVariable_$START_DATE$.nc") +#'# Edit entry to generalize for any variable. Changing variable needs . +#'configuration <- ConfigEditEntry(configuration, "experiments", 1, +#' var_name = ".*", +#' file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +#'# Now apply matching entries for variable and experiment name and show the +#'# result +#'match_info <- ConfigApplyMatchingEntries(configuration, 'tas', +#' exp = c('ExampleExperiment2'), show_result = TRUE) +#' +#'@rdname ConfigEditDefinition +#'@export +ConfigEditDefinition <- function(configuration, name, value, confirm = TRUE) { + continue <- TRUE + if (name %in% names(configuration$definitions)) { + if (confirm) { + while (continue != 'y' && continue != 'n') { + continue <- readline("WARNING: The definition already exists. It will be replaced. Continue? (y/n)\n") + } + continue <- ifelse(continue == 'y', TRUE, FALSE) + } + } + if (continue) { + configuration$definitions[[name]] <- value + } + + configuration +} +#'@rdname ConfigEditDefinition +#'@export +ConfigRemoveDefinition <- function(configuration, name) { + configuration$definitions[[name]] <- NULL + + configuration +} diff --git a/R/ConfigEditEntry.R b/R/ConfigEditEntry.R new file mode 100644 index 0000000000000000000000000000000000000000..d625c1a65507ce0ea48b8bfff8ff175d0326825a --- /dev/null +++ b/R/ConfigEditEntry.R @@ -0,0 +1,204 @@ +#'Add, Remove Or Edit Entries In The Configuration +#' +#'ConfigAddEntry(), ConfigEditEntry() and ConfigRemoveEntry() are functions +#'to manage entries in a configuration object created with ConfigFileOpen().\cr +#'Before adding an entry, make sure the defaults don't do already what you +#'want (ConfigShowDefinitions(), ConfigShowTable()).\cr +#'Before adding an entry, make sure it doesn't override and spoil what other +#'entries do (ConfigShowTable(), ConfigFileOpen()).\cr +#'Before adding an entry, make sure there aren't other entries that already +#'do what you want (ConfigShowSimilarEntries()). +#' +#'@param configuration Configuration object obtained via ConfigFileOpen() +#' or ConfigFileCreate() that will be modified accordingly. +#'@param dataset_type Whether to modify a table of experimental datasets or +#' a table of observational datasets. Can take values 'experiments' or +#' 'observations' respectively. +#'@param position 'position' tells the index in the table of the entry to +#' edit or remove. Use ConfigShowTable() to see the index of the entry. +#' In ConfigAddEntry() it can also take the value "last" (default), that will +#' put the entry at the end of the corresponding level, or "first" at the +#' beginning. See ?ConfigFileOpen for more information. +#' If 'dataset_name' and 'var_name' are specified this argument is ignored in +#' ConfigRemoveEntry(). +#'@param dataset_name,var_name,main_path,file_path,nc_var_name,suffix,varmin,varmax +#' These parameters tell the dataset name, variable name, main path, ..., of +#' the entry to add, edit or remove.\cr 'dataset_name' and 'var_name' can take +#' as a value a POSIX 1003.2 regular expression (see ?ConfigFileOpen).\cr +#' Other parameters can take as a value a shell globbing expression +#' (see ?ConfigFileOpen).\cr +#' 'dataset_name' and 'var_name' take by default the regular expression '.*' +#' (match any dataset and variable name), and the others take by default '*' +#' (associate to the pair 'dataset_name' and 'var_name' all the defined +#' default values. In this case '*' has a special behaviour, it won't be +#' used as a shell globbing expression. See ?ConfigFileOpen and +#' ?ConfigShowDefinitions).\cr +#' 'var_min' and 'var_max' must be a character string.\cr +#' To define these values, you can use defined variables via $VARIABLE_NAME$ +#' or other entry attributes via $ATTRIBUTE_NAME$. See ?ConfigFileOpen for +#' more information. +#' +#'@return The function returns an accordingly modified configuration object. +#' To apply the changes in the configuration file it must be saved using +#' ConfigFileSave(). +#' +#'@seealso ConfigApplyMatchingEntries, ConfigEditDefinition, ConfigEditEntry, +#' ConfigFileOpen, ConfigShowSimilarEntries, ConfigShowTable +#'@examples +#'# Create an empty configuration file +#'config_file <- paste0(tempdir(), "/example.conf") +#'ConfigFileCreate(config_file, confirm = FALSE) +#'# Open it into a configuration object +#'configuration <- ConfigFileOpen(config_file) +#'# Add an entry at the bottom of 4th level of file-per-startdate experiments +#'# table which will associate the experiment "ExampleExperiment" and variable +#'# "ExampleVariable" to some information about its location. +#'configuration <- ConfigAddEntry(configuration, "experiments", +#' "last", "ExampleExperiment", "ExampleVariable", +#' "/path/to/ExampleExperiment/", +#' "ExampleVariable/ExampleVariable_$START_DATE$.nc") +#'# Add another entry +#'configuration <- ConfigAddEntry(configuration, "experiments", +#' "last", "ExampleExperiment2", "ExampleVariable", +#' "/path/to/ExampleExperiment2/", +#' "ExampleVariable/ExampleVariable_$START_DATE$.nc") +#'# Edit second entry to generalize for any variable. Changing variable needs . +#'configuration <- ConfigEditEntry(configuration, "experiments", 2, +#' var_name = ".*", +#' file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +#'# Remove first entry +#'configuration <- ConfigRemoveEntry(configuration, "experiments", +#' "ExampleExperiment", "ExampleVariable") +#'# Show results +#'ConfigShowTable(configuration, "experiments") +#'# Save the configuration +#'ConfigFileSave(configuration, config_file, confirm = FALSE) +#'@rdname ConfigEditEntry +#'@export +ConfigEditEntry <- function(configuration, dataset_type, position, dataset_name = NULL, var_name = NULL, main_path = NULL, file_path = NULL, nc_var_name = NULL, suffix = NULL, varmin = NULL, varmax = NULL) { + if (!(dataset_type %in% c('experiments', 'observations'))) { + stop("Error: 'dataset_type' must be one of 'experiments' or 'observations'") + } + + table_name <- dataset_type + + all_entries <- length(unlist(configuration[[table_name]], recursive = FALSE)) + if (position < 1 || position > all_entries) { + stop("Error: 'position' must be in the range [1, # of table entries]") + } + + found <- FALSE + level <- 1 + index_of_first <- 1 + while (!found && level < 5) { + if (position <= (index_of_first + length(configuration[[table_name]][[level]]) - 1)) { + found <- TRUE + } else { + index_of_first <- index_of_first + length(configuration[[table_name]][[level]]) + level <- level + 1 + } + } + position <- position - index_of_first + 1 + + edited_values <- c(1:8)[c(!is.null(dataset_name), !is.null(var_name), !is.null(main_path), !is.null(file_path), !is.null(nc_var_name), !is.null(suffix), !is.null(varmin), !is.null(varmax))] + configuration[[table_name]][[level]][[position]][edited_values] <- c(dataset_name, var_name, main_path, file_path, nc_var_name, suffix, varmin, varmax) + + configuration +} +#'@rdname ConfigEditEntry +#'@export +ConfigAddEntry <- function(configuration, dataset_type, position = 'last', dataset_name = ".*", var_name = ".*", main_path = "*", file_path = "*", nc_var_name = "*", suffix = "*", varmin = "*", varmax = "*") { + table_name <- dataset_type + if (dataset_name == ".*") { + if (var_name == ".*") { + level <- 1 + } else { + level <- 3 + } + } else { + if (var_name == ".*") { + level <- 2 + } else { + level <- 4 + } + } + + index_of_first <- 0 + index_of_last <- 0 + for (i in 1:level) { + index_of_first <- index_of_first + ifelse(i == 1, 1, length(configuration[[table_name]][[i - 1]])) + index_of_last <- index_of_last + length(configuration[[table_name]][[i]]) + } + + if (position == 'last') { + position <- index_of_last - index_of_first + 1 + 1 + } else if (position == 'first') { + position <- 1 + } else { + if (position < index_of_first || position > index_of_last + 1) { + stop("'position' must be in the range [index of first table entry in corresponding level, index of last table entry in corresponding level + 1]") + } + position <- position - index_of_first + 1 + } + + if (dataset_type == 'experiments' || dataset_type == 'observations') { + configuration[[table_name]][[level]] <- append(configuration[[table_name]][[level]], list(c(dataset_name, var_name, main_path, file_path, nc_var_name, suffix, varmin, varmax)), after = position - 1) + } else { + stop("'dataset_type' must be one of 'experiments' or 'observations'") + } + + configuration +} +#'@rdname ConfigEditEntry +#'@export +ConfigRemoveEntry <- function(configuration, dataset_type, dataset_name = NULL, var_name = NULL, position = NULL) { + table_name <- dataset_type + if (!is.null(dataset_name) && !is.null(var_name)) { + if (dataset_name == ".*") { + if (var_name == ".*") { + level <- 1 + } else { + level <- 3 + } + } else { + if (var_name == ".*") { + level <- 2 + } else { + level <- 4 + } + } + + position <- which(unlist(lapply(configuration[[table_name]][[level]], "[", 1)) == dataset_name & + unlist(lapply(configuration[[table_name]][[level]], "[", 2)) == var_name)[1] + if (is.na(position)) { + stop("No entry found that matches 'dataset_name' and 'var_name'.") + } + } else { + if (is.null(position)) { + stop("At least ('dataset_name', 'var_name') or 'position' must be specified.") + } + + all_entries <- length(unlist(configuration[[table_name]], recursive = FALSE)) + if (position < 1 || position > all_entries) { + stop("'position' must be in the range [1, # of table entries]") + } + + found <- FALSE + level <- 1 + index_of_first <- 1 + while (!found && level < 5) { + if (position <= (index_of_first + length(configuration[[table_name]][[level]]) - 1)) { + found <- TRUE + } else { + index_of_first <- index_of_first + length(configuration[[table_name]][[level]]) + level <- level + 1 + } + } + position <- position - index_of_first + 1 + } + + configuration[[table_name]][[level]][[position]] <- NULL + + configuration +} + diff --git a/R/ConfigFileOpen.R b/R/ConfigFileOpen.R new file mode 100644 index 0000000000000000000000000000000000000000..c442f1f0c5e7a1b39717309cb6736067e5163c8e --- /dev/null +++ b/R/ConfigFileOpen.R @@ -0,0 +1,391 @@ +#'Functions To Create Open And Save Configuration File +#' +#'These functions help in creating, opening and saving configuration files. +#' +#'@param file_path Path to the configuration file to create/open/save. +#'@param silent Flag to activate or deactivate verbose mode. +#' Defaults to FALSE (verbose mode on). +#'@param configuration Configuration object to save in a file. +#'@param confirm Flag to stipulate whether to ask for confirmation when +#' saving a configuration file that already exists.\cr +#' Defaults to TRUE (confirmation asked). +#'@param stop TRUE/FALSE whether to raise an error if not all the mandatory +#' default variables are defined in the configuration file. +#' +#'@details +#'ConfigFileOpen() loads all the data contained in the configuration file +#'specified as parameter 'file_path'. +#'Returns a configuration object with the variables needed for the +#'configuration file mechanism to work. +#'This function is called from inside the Load() function to load the +#'configuration file specified in 'configfile'.\cr\cr +#'ConfigFileCreate() creates an empty configuration file and saves it to +#'the specified path. It may be opened later with ConfigFileOpen() to be edited. +#' Some default values are set when creating a file with this function, you +#'can check these with ConfigShowDefinitions().\cr\cr +#'ConfigFileSave() saves a configuration object into a file, which may then +#'be used from Load().\cr\cr +#'Two examples of configuration files can be found inside the 'inst/config/' +#'folder in the package: +#' \itemize{ +#' \item{BSC.conf: configuration file used at BSC-CNS. Contains location +#' data on several datasets and variables.} +#' \item{template.conf: very simple configuration file intended to be used as +#' pattern when starting from scratch.} +#' } +#'How the configuration file works:\cr +#'~~~~~~~~~~~~~~~~~~~~~~~~~~~~\cr +#'It contains one list and two tables.\cr +#'Each of these have a header that starts with '!!'. These are key lines and +#'should not be removed or reordered.\cr +#'Lines starting with '#' and blank lines will be ignored. +#'The list should contains variable definitions and default value definitions.\cr +#'The first table contains information about experiments.\cr +#'The third table contains information about observations.\cr +#'Each table entry is a list of comma-separated elements.\cr +#'The two first are part of a key that is associated to a value formed by the +#'other elements.\cr +#'The key elements are a dataset identifier and a variable name.\cr +#'The value elements are the dataset main path, dataset file path, the +#'variable name inside the .nc file, a default suffix (explained below) and a +#'minimum and maximum vaues beyond which loaded data is deactivated.\cr +#'Given a dataset name and a variable name, a full path is obtained +#'concatenating the main path and the file path.\cr +#'Also the nc variable name, the suffixes and the limit values are obtained.\cr +#'Any of the elements in the keys can contain regular expressions[1] that will +#'cause matching for sets of dataset names or variable names.\cr +#'The dataset path and file path can contain shell globbing expressions[2] +#'that will cause matching for sets of paths when fetching the file in the +#'full path.\cr +#'The full path can point to an OPeNDAP URL.\cr +#'Any of the elements in the value can contain variables that will be replaced +#'to an associated string.\cr +#'Variables can be defined only in the list at the top of the file. \cr +#'The pattern of a variable definition is\cr +#'VARIABLE_NAME = VARIABLE_VALUE\cr +#'and can be accessed from within the table values or from within the variable +#'values as\cr +#' $VARIABLE_NAME$\cr +#'For example:\cr +#' FILE_NAME = tos.nc\cr +#' !!table of experiments\cr +#' ecmwf, tos, /path/to/dataset/, $FILE_NAME$\cr +#'There are some reserved variables that will offer information about the +#'store frequency, the current startdate Load() is fetching, etc:\cr +#' $VAR_NAME$, $START_DATE$, $STORE_FREQ$, $MEMBER_NUMBER$\cr +#' for experiments only: $EXP_NAME$\cr +#' for observations only: $OBS_NAME$, $YEAR$, $MONTH$, $DAY$\cr +#'Additionally, from an element in an entry value you can access the other +#'elements of the entry as:\cr +#' $EXP_MAIN_PATH$, $EXP_FILE_PATH$, \cr$VAR_NAME$, $SUFFIX$, $VAR_MIN$, $VAR_MAX$\cr +#'\cr +#'The variable $SUFFIX$ is useful because it can be used to take part in the +#'main or file path. For example: '/path/to$SUFFIX$/dataset/'.\cr +#'It will be replaced by the value in the column that corresponds to the +#'suffix unless the user specifies a different suffix via the parameter +#''suffixexp' or 'suffixobs'.\cr +#'This way the user is able to load two variables with the same name in the +#'same dataset but with slight modifications, with a suffix anywhere in the +#'path to the data that advices of this slight modification.\cr\cr +#'The entries in a table will be grouped in 4 levels of specificity: +#' \enumerate{ +#' \item{ +#'General entries:\cr +#' - the key dataset name and variable name are both a regular expression +#'matching any sequence of characters (.*) that will cause matching for any +#'pair of dataset and variable names\cr +#' Example: .*, .*, /dataset/main/path/, file/path, nc_var_name, suffix, +#'var_min, var_max +#' } +#' \item{ +#'Dataset entries:\cr +#' - the key variable name matches any sequence of characters\cr +#' Example: ecmwf, .*, /dataset/main/path/, file/path, nc_var_name, +#' suffix, var_min, var_max +#' } +#' \item{ +#'Variable entries:\cr +#' - the key dataset name matches any sequence of characters\cr +#' Example: .*, tos, /dataset/main/path/, file/path, nc_var_name, +#' suffix, var_min, var_max +#' } +#' \item{ +#' Specific entries:\cr +#' - both key values are specified\cr +#' Example: ecmwf, tos, /dataset/main/path/, file/path, nc_var_name, +#' suffix, var_min, var_max +#' } +#' } +#'Given a pair of dataset name and variable name for which we want to know the +#'full path, all the rules that match will be applied from more general to +#'more specific.\cr +#'If there is more than one entry per group that match a given key pair, +#'these will be applied in the order of appearance in the configuration file +#'(top to bottom).\cr\cr +#'An asterisk (*) in any value element will be interpreted as 'leave it as is +#'or take the default value if yet not defined'.\cr +#'The default values are defined in the following reserved variables:\cr +#' $DEFAULT_EXP_MAIN_PATH$, $DEFAULT_EXP_FILE_PATH$, $DEFAULT_NC_VAR_NAME$, +#'$DEFAULT_OBS_MAIN_PATH$, $DEFAULT_OBS_FILE_PATH$, $DEFAULT_SUFFIX$, +#'$DEFAULT_VAR_MIN$, $DEFAULT_VAR_MAX$, \cr +#'$DEFAULT_DIM_NAME_LATITUDES$, $DEFAULT_DIM_NAME_LONGITUDES$, \cr +#'$DEFAULT_DIM_NAME_MEMBERS$\cr\cr +#'Trailing asterisks in an entry are not mandatory. For example\cr +#' ecmwf, .*, /dataset/main/path/, *, *, *, *, *\cr +#'will have the same effect as\cr +#' ecmwf, .*, /dataset/main/path/ \cr\cr +#'A double quote only (") in any key or value element will be interpreted as +#''fill in with the same value as the entry above'. +#' +#'@return +#'ConfigFileOpen() returns a configuration object with all the information for +#' the configuration file mechanism to work.\cr +#'ConfigFileSave() returns TRUE if the file has been saved and FALSE otherwise.\cr +#'ConfigFileCreate() returns nothing. +#' +#'@seealso ConfigApplyMatchingEntries, ConfigEditDefinition, +#' ConfigEditEntry, ConfigFileOpen, ConfigShowSimilarEntries, ConfigShowTable +#'@references +#'[1] \url{https://stat.ethz.ch/R-manual/R-devel/library/base/html/regex.html}\cr +#'[2] \url{http://tldp.org/LDP/abs/html/globbingref.html} +#'@examples +#'# Create an empty configuration file +#'config_file <- paste0(tempdir(), "/example.conf") +#'ConfigFileCreate(config_file, confirm = FALSE) +#'# Open it into a configuration object +#'configuration <- ConfigFileOpen(config_file) +#'# Add an entry at the bottom of 4th level of file-per-startdate experiments +#'# table which will associate the experiment "ExampleExperiment2" and variable +#'# "ExampleVariable" to some information about its location. +#'configuration <- ConfigAddEntry(configuration, "experiments", +#' "last", "ExampleExperiment2", "ExampleVariable", +#' "/path/to/ExampleExperiment2/", +#' "ExampleVariable/ExampleVariable_$START_DATE$.nc") +#'# Edit entry to generalize for any variable. Changing variable needs . +#'configuration <- ConfigEditEntry(configuration, "experiments", 1, +#' var_name = ".*", +#' file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +#'# Now apply matching entries for variable and experiment name and show the +#'# result +#'match_info <- ConfigApplyMatchingEntries(configuration, 'tas', +#' exp = c('ExampleExperiment2'), show_result = TRUE) +#'# Finally save the configuration file. +#'ConfigFileSave(configuration, config_file, confirm = FALSE) +#' +#'@rdname ConfigFileOpen +#'@export +ConfigFileOpen <- function(file_path, silent = FALSE, stop = FALSE) { + if (!silent) { + .message(paste("Reading configuration file:", file_path)) + } + # Read the data from the configuration file. + ## Remove comments, tabulations, spaces, empty lines, ... + all_lines <- readLines(file_path) + all_lines <- gsub("\t", "", all_lines) + all_lines <- gsub(" ", "", all_lines) + all_lines <- all_lines[-grep("^#", all_lines)] + all_lines <- all_lines[-grep("^$", all_lines)] + ## Detect key lines + key_positions <- grep("^!!", all_lines) + + ## Check that the format of the configuration file is right. + if (length(key_positions) != 3) { + stop('Error: The configuration file is corrupted or outdated: the key lines do not match the expected pattern.') + } + + ## Start parsing the configuration. + # The variables that are used in the configuration filed are kept in + # 'definitions', an associative array (key-value array or dictionary). + definitions <- list() + ## Parse the variables definitions in the whole configuration file + if (key_positions[1] + 1 < key_positions[2]) { + all_definitions <- all_lines[(key_positions[1] + 1):(key_positions[2] - 1)] + } else { + all_definitions <- c() + } + if (length(grep("=", all_definitions)) == length(all_definitions)) { + for (definition in all_definitions) { + if (length(which(strsplit(definition, "")[[1]] == "=")) == 1) { + var_name <- strsplit(definition, "=")[[1]][1] + tmp_value <- strsplit(definition, "=")[[1]][2] + var_value <- ifelse(is.na(tmp_value), "", tmp_value) + if ((length(which(strsplit(var_value, "")[[1]] == "$")) %% 2) == 0) { + definitions[[var_name]] <- var_value + } else { + stop('Error: The configuration file is corrupted: there are incorrect variable definition lines in the definition zone. A closing "$" symbol may be missing.') + } + } else { + stop('Error: The configuration file is corrupted: there are incorrect definition lines in the definition zone.') + } + } + } else { + stop('Error: The configuration file is corrupted: there are malformed definition lines in the definition zone.') + } + mandatory_definitions <- c("DEFAULT_EXP_MAIN_PATH", "DEFAULT_EXP_FILE_PATH", + "DEFAULT_NC_VAR_NAME", "DEFAULT_SUFFIX", "DEFAULT_VAR_MIN", + "DEFAULT_VAR_MAX", "DEFAULT_OBS_MAIN_PATH", + "DEFAULT_OBS_FILE_PATH", "DEFAULT_DIM_NAME_LONGITUDES", + "DEFAULT_DIM_NAME_LATITUDES", "DEFAULT_DIM_NAME_MEMBERS") + if (any(!(mandatory_definitions %in% names(definitions)))) { + .warning("Some of the mandatory variables below are not defined in the configuration file. You can add them with ConfigFileOpen(), ConfigEditDefinition() and ConfigFileSave() or by editing the configuration file by hand, as specified in ?ConfigFileOpen.") + if (stop) { + stop(paste(mandatory_definitions, collapse = ', ')) + } else { + .warning(paste(mandatory_definitions, collapse = ', ')) + } + } + + # Parse the entries in the tables + ## These are the indices of the key positions in the vector of key positions + tables_key_positions <- c(2, 3) + current_table <- 1 + for (table_key_position in tables_key_positions) { + datasets <- list(c(), c(), c(), c()) + + if (table_key_position == 2) { + id <- 'EXP' + } else { + id <- 'OBS' + } + default_values <- c(paste0("$DEFAULT_", id, "_MAIN_PATH$"), paste0("$DEFAULT_", id, "_FILE_PATH$"), "$DEFAULT_NC_VAR_NAME$", '$DEFAULT_SUFFIX$', '$DEFAULT_VAR_MIN$', '$DEFAULT_VAR_MAX$') + previous_values <- c(".*", ".*", default_values) + table_lines <- c() + table_end <- ifelse(table_key_position == max(tables_key_positions), length(all_lines), key_positions[table_key_position + 1] - 1) + if ((key_positions[table_key_position] + 1) <= table_end) { + table_lines <- all_lines[(key_positions[table_key_position] + 1):table_end] + table_lines <- strsplit(table_lines, ",") + } + + current_line <- 1 + for (entry in table_lines) { + if (entry[1] == '"') { + entry[1] <- previous_values[1] + } + if ((length(entry) > 1)) { + if (entry[2] == '"') { + entry[2] <- previous_values[2] + } + } else { + stop('Error: The variable column must be defined in all the entries in the tables in the configuration file.') + } + if (length(entry) > length(default_values) + 2) { + stop(paste0("Error: More elements than expected in the entry ", current_line, " in the configuration file.")) + } + for (value_position in 1:length(default_values)) { + if ((length(entry) > value_position + 1)) { + if (entry[value_position + 2] == '"') { + entry[value_position + 2] <- previous_values[value_position + 2] + } + } else { + entry[value_position + 2] <- '*' + } + } + if (entry[1] == '.*') { + if (entry[2] == '.*') { + datasets[[1]] <- c(datasets[[1]], list(entry)) + } else { + datasets[[3]] <- c(datasets[[3]], list(entry)) + } + } else { + if (entry[2] == '.*') { + datasets[[2]] <- c(datasets[[2]], list(entry)) + } else { + datasets[[4]] <- c(datasets[[4]], list(entry)) + } + } + current_line <- current_line + 1 + previous_values <- entry + } + + if (current_table == 1) { + exps <- datasets + } else if (current_table == 2) { + obs <- datasets + } + + current_table <- current_table + 1 + } + + if (!silent) { + .message("Config file read successfully.") + } + + invisible(list(definitions = definitions, + experiments = exps, + observations = obs)) +} + +#'@rdname ConfigFileOpen +#'@export +ConfigFileCreate <- function(file_path, confirm = TRUE) { + success <- ConfigFileSave(list(definitions = list( + DEFAULT_EXP_MAIN_PATH = "$EXP_NAME$", + DEFAULT_EXP_FILE_PATH = "$STORE_FREQ$/$VAR_NAME$_$START_DATE$.nc", + DEFAULT_NC_VAR_NAME = "$VAR_NAME$", + DEFAULT_SUFFIX = "", DEFAULT_VAR_MIN = "", + DEFAULT_VAR_MAX = "", DEFAULT_OBS_MAIN_PATH = "$OBS_NAME$", + DEFAULT_OBS_FILE_PATH = "$STORE_FREQ$/$VAR_NAME$_$YEAR$$MONTH$.nc", + DEFAULT_DIM_NAME_LONGITUDES = "longitude", DEFAULT_DIM_NAME_LATITUDES = "latitude", + DEFAULT_DIM_NAME_MEMBERS = "ensemble")), file_path, confirm = confirm) + if (success) { + .warning("You have just created an empty configuration file. You can edit it with ConfigAddEntry(). You can edit the defaults according to your needs with the functions ConfigFileOpen(), ConfigEditDefinition() and ConfigFileSave() or edit the file manually as specified in ?ConfigFileOpen.") + } +} + +#'@rdname ConfigFileOpen +#'@export +ConfigFileSave <- function(configuration, file_path, confirm = TRUE) { + continue <- TRUE + if (file.exists(file_path)) { + if (confirm) { + while (continue != 'y' && continue != 'n') { + continue <- readline(paste0("WARNING: The configuration file '", file_path, "' already exists. It will be replaced. Continue? (y/n)\n")) + } + continue <- ifelse(continue == 'y', TRUE, FALSE) + } + } + if (continue) { + file_conn <- file(file_path) + file_text <- c( +"# s2dv configuration file", +"#", +"# Check ?ConfigFileOpen after loading s2dv for detailed ", +"# documentation on this configuration file.", +"" + ) + + file_text <- c(file_text, + paste(rep("#", nchar("definitions") + 2), collapse = ''), + paste0("!!definitions"), + paste(rep("#", nchar("definitions") + 2), collapse = '') + ) + defaults <- configuration$definitions[grep("^DEFAULT_", names(configuration$definitions))] + definitions <- configuration$definitions[-grep("^DEFAULT_", names(configuration$definitions))] + file_text <- c(file_text, as.vector(paste(names(defaults), unlist(defaults), sep = " = "))) + file_text <- c(file_text, as.vector(paste(names(definitions), unlist(definitions), sep = " = "))) + file_text <- c(file_text, "") + + table_names <- c("experiments", "observations") + for (table_name in table_names) { + if (table_name == "experiments") { + dataset_type <- 'exp' + } else { + dataset_type <- 'obs' + } + file_text <- c(file_text, +"", + paste(rep("#", nchar(table_name) + 11), collapse = ''), + paste0("!!table of ", gsub("_", " ", table_name)), + paste(rep("#", nchar(table_name) + 11), collapse = ''), + paste0("#", dataset_type, "_name, var_name[, ", dataset_type, "_main_path[, ", dataset_type, "_file_path[, nc_var_name[, suffix[, var_min[, var_max]]]]]]") + ) + # Some heavy entry processing still to do here, to put asterisks, empty spaces, double quotes, and reduce options + file_text <- c(file_text, unlist(lapply(configuration[[table_name]], function (x) lapply(x, function (y) paste(unlist(y), collapse = ", "))))) + } + + writeLines(file_text, file_conn) + close(file_conn) + } + + invisible(continue) +} diff --git a/R/ConfigShowSimilarEntries.R b/R/ConfigShowSimilarEntries.R new file mode 100644 index 0000000000000000000000000000000000000000..ccad3f3229e2d9480d8a44af5f025b3b7464085a --- /dev/null +++ b/R/ConfigShowSimilarEntries.R @@ -0,0 +1,156 @@ +#'Find Similar Entries In Tables Of Datasets +#' +#'These functions help in finding similar entries in tables of supported +#'datasets by comparing all entries with some given information.\cr +#'This is useful when dealing with complex configuration files and not sure +#'if already support certain variables or datasets.\cr +#'At least one field must be provided in ConfigShowSimilarEntries(). +#'Other fields can be unspecified and won't be taken into account. If more +#'than one field is provided, sameness is avreaged over all provided fields +#'and entries are sorted from higher average to lower. +#' +#'@param configuration Configuration object obtained either from +#' ConfigFileCreate() or ConfigFileOpen(). +#'@param dataset_name Optional dataset name to look for similars of. +#'@param var_name Optional variable name to look for similars of. +#'@param main_path Optional main path to look for similars of. +#'@param file_path Optional file path to look for similars of. +#'@param nc_var_name Optional variable name inside NetCDF file to look for similars of. +#'@param suffix Optional suffix to look for similars of. +#'@param varmin Optional variable minimum to look for similars of. +#'@param varmax Optional variable maximum to look for similars of. +#'@param n_results Top 'n_results' alike results will be shown only. Defaults +#' to 10 in ConfigShowSimilarEntries() and to 5 in ConfigShowSimilarVars(). +#' +#'@details +#'Sameness is calculated with string distances as specified by Simon White +#'in [1]. +#' +#'@return These functions return information about the found matches. +#' +#'@seealso ConfigApplyMatchingEntries, ConfigEditDefinition, +#' ConfigEditEntry, ConfigFileOpen, ConfigShowSimilarEntries, ConfigShowTable +#'@references +#'[1] Simon White, string seamness: +#' \url{http://www.catalysoft.com/articles/StrikeAMatch.html} +#'@examples +#'# Create an empty configuration file +#'config_file <- paste0(tempdir(), "/example.conf") +#'ConfigFileCreate(config_file, confirm = FALSE) +#'# Open it into a configuration object +#'configuration <- ConfigFileOpen(config_file) +#'# Add an entry at the bottom of 4th level of file-per-startdate experiments +#'# table which will associate the experiment "ExampleExperiment2" and variable +#'# "ExampleVariable" to some information about its location. +#'configuration <- ConfigAddEntry(configuration, "experiments", "last", +#' "ExampleExperiment2", "ExampleVariable", +#' "/path/to/ExampleExperiment2/", +#' "ExampleVariable/ExampleVariable_$START_DATE$.nc") +#'# Edit entry to generalize for any variable. Changing variable needs . +#'configuration <- ConfigEditEntry(configuration, "experiments", 1, +#' var_name = "Var.*", +#' file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +#'# Look for similar entries +#'ConfigShowSimilarEntries(configuration, dataset_name = "Exper", +#' var_name = "Vari") +#' +#'@export +ConfigShowSimilarEntries <- function(configuration, dataset_name = NULL, var_name = NULL, main_path = NULL, file_path = NULL, nc_var_name = NULL, suffix = NULL, varmin = NULL, varmax = NULL, n_results = 10) { + ## Simon White: http://www.catalysoft.com/articles/StrikeAMatch.html + getBigrams <- function(str) { + bigramlst <- list() + for (i in 1:(nchar(str) - 1)) { + bigramlst[[i]] <- substr(str, i, i + 1) + } + return(bigramlst) + } + + strSimilarity <- function(str1, str2) { + str1 <- tolower(str1) + str2 <- tolower(str2) + + if (is.null(str1)) { + str1 <- "" + } else if (is.na(str1)) { + str1 <- "" + } + if (is.null(str2)) { + str2 <- "" + } else if (is.na(str2)) { + str2 <- "" + } + if (nchar(str1) <= 1 && nchar(str2) <= 1) { + return (ifelse(str1 == str2, 1, 0)) + } else if (nchar(str1) == 1) { + return (ifelse(grepl(str1, str2, fixed = TRUE), 1, 0)) + } else if (nchar(str2) == 1) { + return (ifelse(grepl(str2, str1, fixed = TRUE), 1, 0)) + } else if (nchar(str1) == 0 || nchar(str2) == 0) { + return (0) + } else { + pairs1 <- getBigrams(str1) + pairs2 <- getBigrams(str2) + unionlen <- length(pairs1) + length(pairs2) + hit_count <- 0 + for (x in 1:length(pairs1)) { + for(y in 1:length(pairs2)) { + if (pairs1[[x]] == pairs2[[y]]) { + hit_count <- hit_count + 1 + } + } + } + return ((2.0 * hit_count) / unionlen) + } + } + + strSimilarityVec <- Vectorize(strSimilarity, c('str1', 'str2'), USE.NAMES = FALSE) + + all_tables <- c('experiments', 'observations') + all_fields <- c('dataset_name', 'var_name', 'main_path', 'file_path', 'nc_var_name', 'suffix', 'varmin', 'varmax') + selected_fields <- which(unlist(lapply(as.list(match.call())[all_fields], function (x) !is.null(x)))) + values <- unlist(as.list(match.call())[all_fields[selected_fields]], use.names = FALSE) + + if (length(selected_fields) < 1) { + stop("There must be at least one selected field ('dataset_name', 'var_name', 'main_path', 'file_path', 'nc_var_name', 'suffix', 'varmin' or 'varmax').") + } + + similarities <- list() + for (table in all_tables) { + similarities[[table]] <- vector("list", 4) + for (level in 1:4) { + if (length(configuration[[table]][[level]]) > 0) { + similarities[[table]][[level]] <- unlist(lapply(configuration[[table]][[level]], function(x) mean(strSimilarityVec(x[selected_fields], values)))) + } + } + } + + n_results <- min(n_results, length(unlist(similarities))) + threshold <- sort(unlist(similarities, use.names = FALSE), decreasing = TRUE)[n_results] + n_minimums <- sum(sort(unlist(similarities, use.names = FALSE), decreasing = TRUE)[1:n_results] == threshold) + + matches <- list() + n_picked_minimums <- 0 + for (table in all_tables) { + matches[[table]] <- list() + line_numbers <- c() + offset <- 0 + for (level in 1:4) { + matches_to_pick <- which(similarities[[table]][[level]] > threshold) + if (n_picked_minimums < n_minimums) { + minimums <- which(similarities[[table]][[level]] == threshold) + if (length(minimums) + n_picked_minimums > n_minimums) { + minimums <- minimums[1:(n_minimums - n_picked_minimums)] + } + matches_to_pick <- c(matches_to_pick, minimums) + n_picked_minimums <- n_picked_minimums + length(minimums) + } + line_numbers <- c(line_numbers, matches_to_pick + offset) + offset <- offset + length(similarities[[table]][[level]]) + matches[[table]][[level]] <- configuration[[table]][[level]][matches_to_pick] + } + dataset_type <- ifelse(grepl('experiments', table), 'experiments', 'observations') + ConfigShowTable(matches, dataset_type, line_numbers) + } + + invisible(matches) +} diff --git a/R/ConfigShowTable.R b/R/ConfigShowTable.R new file mode 100644 index 0000000000000000000000000000000000000000..173a689b15af787d377c353a2da1b4a656b40eb5 --- /dev/null +++ b/R/ConfigShowTable.R @@ -0,0 +1,75 @@ +#'Show Configuration Tables And Definitions +#' +#'These functions show the tables of supported datasets and definitions in a +#'configuration object obtained via ConfigFileCreate() or ConfigFileOpen(). +#' +#'@param configuration Configuration object obtained from ConfigFileCreate() +#' or ConfigFileOpen(). +#'@param dataset_type In ConfigShowTable(), 'dataset_type' tells whether the +#' table to show is of experimental datasets or of observational datasets. +#' Can take values 'experiments' or 'observations'. +#'@param line_numbers 'line_numbers' is an optional vector of numbers as long +#' as the number of entries in the specified table. Intended for internal use. +#' +#'@seealso [ConfigApplyMatchingEntries()], [ConfigEditDefinition()], +#' [ConfigEditEntry()], [ConfigFileOpen()], [ConfigShowSimilarEntries()], +#' [ConfigShowTable()]. +#'@return These functions return nothing. +#' +#'@examples +#'# Create an empty configuration file +#'config_file <- paste0(tempdir(), "/example.conf") +#'ConfigFileCreate(config_file, confirm = FALSE) +#'# Open it into a configuration object +#'configuration <- ConfigFileOpen(config_file) +#'# Add an entry at the bottom of 4th level of file-per-startdate experiments +#'# table which will associate the experiment "ExampleExperiment2" and variable +#'# "ExampleVariable" to some information about its location. +#'configuration <- ConfigAddEntry(configuration, "experiments", "last", +#' "ExampleExperiment2", "ExampleVariable", +#' "/path/to/ExampleExperiment2/", +#' "ExampleVariable/ExampleVariable_$START_DATE$.nc") +#'# Edit entry to generalize for any variable. Changing variable needs . +#'configuration <- ConfigEditEntry(configuration, "experiments", 1, +#' var_name = ".*", +#' file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +#'# Show tables, lists and definitions +#'ConfigShowTable(configuration, 'experiments') +#'ConfigShowDefinitions(configuration) +#' +#'@rdname ConfigShowTable +#'@export +ConfigShowTable <- function(configuration, dataset_type, line_numbers = NULL) { + table_name <- dataset_type + header <- paste("| Matches in", gsub("_", " ", table_name), "|") + .message(paste(rep("-", nchar(header) - 1), collapse = '')) + .message(header) + .message(paste(rep("-", nchar(header) - 1), collapse = '')) + .message("#dataset_name, var_name[, main_path[, file_path[, nc_var_name[, suffix[, var_min[, var_max]]]]]]") + + if (is.null(line_numbers)) { + line_numbers <- 1:length(unlist(configuration[[table_name]], recursive = FALSE)) + } + line_number <- 1 + + level <- 1 + invisible(lapply(configuration[[table_name]], + function(x) { + .message(paste("# Level", level, "#")) + lapply(x, + function(y) { + cat(paste(line_numbers[line_number], ": ", paste(unlist(y), collapse = ', '), "\n", sep = '')) + line_number <<- line_number + 1 + } + ) + level <<- level + 1 + } + )) +} +#'@rdname ConfigShowTable +#'@export +ConfigShowDefinitions <- function(configuration) { + defaults <- grep("^DEFAULT_", names(configuration$definitions)) + invisible(lapply(as.vector(paste(names(configuration$definitions)[defaults], paste(unlist(configuration$definitions)[defaults], "\n", sep = ''), sep = " = ")), cat)) + invisible(lapply(as.vector(paste(names(configuration$definitions)[-defaults], paste(unlist(configuration$definitions)[-defaults], "\n", sep = ''), sep = " = ")), cat)) +} diff --git a/R/Corr.R b/R/Corr.R index 8c0e581006da3c8eeddb9ff1c090cf4a37a3d6f9..38d3901529d185a92e53b9ec7e944728df6427d9 100644 --- a/R/Corr.R +++ b/R/Corr.R @@ -53,34 +53,16 @@ #' The upper confidence interval. Only present if \code{conf = TRUE}. #'} #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2011-04 (V. Guemas, \email{vguemas@bsc.es}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to R CRAN\cr -#'1.1 - 2014-10 (M. Menegoz, \email{martin.menegoz@bsc.es}) - Adding conf.lev argument\cr -#'1.2 - 2015-03 (L.P. Caron, \email{louis-philippe.caron@@bsc.es}) - Adding method argument\cr -#'1.3 - 2017-02 (A. Hunter, \email{alasdair.hunter@bsc.es}) - Adapted to veriApply() -#'3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Adapt multiApply feature #'@examples #'# Load sample data as in Load() example: #'example(Load) #'clim <- Clim(sampleData$mod, sampleData$obs) -#'ano_exp <- Ano(sampleData$mod, clim$clim_exp) -#'ano_obs <- Ano(sampleData$obs, clim$clim_obs) -#'runmean_months <- 12 -#'dim_to_smooth <- 4 -#'# Smooth along lead-times -#'smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth) -#'smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth) -#'leadtimes_per_startdate <- 60 -#'corr <- Corr(smooth_ano_exp, -#' smooth_ano_obs, -#' comp_dim = 'ftime', #Discard start dates which contain any NA ftime -#' limits = c(ceiling((runmean_months + 1) / 2), -#' leadtimes_per_startdate - floor(runmean_months / 2))) +#'corr <- Corr(clim$clim_exp, clim$clim_obs, time_dim = 'ftime') +#'# Renew the example when Ano and Smoothing is ready #' #'@rdname Corr #'@import multiApply +#'@importFrom ClimProjDiags Subset #'@importFrom stats cor pt qnorm #'@export Corr <- function(exp, obs, time_dim = 'sdate', memb_dim = 'member', @@ -184,7 +166,7 @@ Corr <- function(exp, obs, time_dim = 'sdate', memb_dim = 'member', name_exp <- names(dim(exp)) name_obs <- names(dim(obs)) order_obs <- match(name_exp, name_obs) - obs <- s2dverification:::.aperm2(obs, order_obs) + obs <- Reorder(obs, order_obs) ############################### diff --git a/R/Eno.R b/R/Eno.R index bb27b926789a0293bc7f5dbd06745df53d77afee..9375b78bb1c7f7789ea0a71f24c9b7918374d964 100644 --- a/R/Eno.R +++ b/R/Eno.R @@ -18,12 +18,6 @@ #' time_dim dimension, which is removed after the computation. The array #' indicates the number of effective sample along time_dim. #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2011-05 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to R CRAN -#'3.0 - 2019-12 (A. Ho, \email{an.ho@bsc.es}) - Adopt multiApply feature -#' #'@examples #'set.seed(1) #'data <- array(rnorm(800), dim = c(dataset = 1, member = 2, sdate = 4, diff --git a/R/InsertDim.R b/R/InsertDim.R index 2a28f5c10a917f111627f90c268c68c4854e1d52..1ec5b8f8d2eef659b6eba62e9ae5abb8abf82718 100644 --- a/R/InsertDim.R +++ b/R/InsertDim.R @@ -13,13 +13,6 @@ #' #'@return An array as parameter 'data' but with the added named dimension. #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to R CRAN\cr -#'1.1 - 2015-03 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Improvements -#'3.0 - 2019-12 (A. Ho, \email{an.ho@bsc.es}) - Modify with multiApply -#' #'@examples #'a <- array(rnorm(15), dim = c(a = 3, b = 1, c = 5, d = 1)) #'res <- InsertDim(InsertDim(a, posdim = 2, lendim = 1, name = 'e'), 4, c(f = 2)) diff --git a/R/LeapYear.R b/R/LeapYear.R index 89865604465020d1014fb8ecd55c17f2f9231083..87d4e18126bda0d18d511c61377cc8a6d407afbc 100644 --- a/R/LeapYear.R +++ b/R/LeapYear.R @@ -6,10 +6,6 @@ #' #'@return Boolean telling whether the year is a leap year or not. #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2011-03 (V. Guemas, \email{vguemas@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN #'@examples #'print(LeapYear(1990)) #'print(LeapYear(1991)) diff --git a/R/Load.R b/R/Load.R index cded9565fe77ae89528bd4c701d171982647c795..b6a3ef62e6230d4a7fa8fe65932ca5a2a913a732 100644 --- a/R/Load.R +++ b/R/Load.R @@ -42,7 +42,7 @@ #'the individual grid of each dataset but can also be averaged after #'interpolating into a common grid. See parameters 'grid' and 'method'.\cr #'Once the two arrays are filled by calling this function, other functions in -#'the s2dverification package that receive as inputs data formatted in this +#'the s2dv package that receive as inputs data formatted in this #'data structure can be executed (e.g: \code{Clim()} to compute climatologies, #'\code{Ano()} to compute anomalies, ...).\cr\cr #'Load() has many additional parameters to disable values and trim dimensions @@ -455,7 +455,7 @@ #' Warning: list() compulsory even if loading 1 experimental dataset only!\cr #' E.g., list(array(1, dim = c(num_lons, num_lats))) #'@param maskobs See help on parameter 'maskmod'. -#'@param configfile Path to the s2dverification configuration file from which +#'@param configfile Path to the s2dv configuration file from which #' to retrieve information on location in file system (and other) of datasets.\cr #' If not specified, the configuration file used at BSC-ES will be used #' (it is included in the package).\cr @@ -670,13 +670,6 @@ #' } #' } #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@bsc.es}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to CRAN\cr -#'1.2 - 2015-02 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Generalisation + parallelisation\cr -#'1.3 - 2015-07 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Improvements related to configuration file mechanism\cr -#'1.4 - 2016-01 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Added subsetting capabilities #'@examples #'# Let's assume we want to perform verification with data of a variable #'# called 'tos' from a model called 'model' and observed data coming from @@ -762,7 +755,7 @@ #'# Example 1: Providing lists of lists to 'exp' and 'obs': #'# #' \dontrun{ -#'data_path <- system.file('sample_data', package = 's2dverification') +#'data_path <- system.file('sample_data', package = 's2dv') #'exp <- list( #' name = 'experiment', #' path = file.path(data_path, 'model/$EXP_NAME$/monthly_mean', @@ -788,7 +781,7 @@ #'# writing a configuration file). #'# #' \dontrun{ -#'data_path <- system.file('sample_data', package = 's2dverification') +#'data_path <- system.file('sample_data', package = 's2dv') #'expA <- list(name = 'experiment', path = file.path(data_path, #' 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', #' '$VAR_NAME$_$START_DATE$.nc')) @@ -802,7 +795,7 @@ #' output = 'areave', latmin = 27, latmax = 48, #' lonmin = -12, lonmax = 40) #'# -#'# Example 2: providing character strings in 'exp' and 'obs', and providing +#'# Example 3: providing character strings in 'exp' and 'obs', and providing #'# a configuration file. #'# The configuration file 'sample.conf' that we will create in the example #'# has the proper entries to load these (see ?LoadConfigFile for details on @@ -813,7 +806,7 @@ #'c <- ConfigFileOpen(configfile) #'c <- ConfigEditDefinition(c, 'DEFAULT_VAR_MIN', '-1e19', confirm = FALSE) #'c <- ConfigEditDefinition(c, 'DEFAULT_VAR_MAX', '1e19', confirm = FALSE) -#'data_path <- system.file('sample_data', package = 's2dverification') +#'data_path <- system.file('sample_data', package = 's2dv') #'exp_data_path <- paste0(data_path, '/model/$EXP_NAME$/') #'obs_data_path <- paste0(data_path, '/$OBS_NAME$/') #'c <- ConfigAddEntry(c, 'experiments', dataset_name = 'experiment', @@ -832,11 +825,11 @@ #' } #' \dontshow{ #'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -#'sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), -#' c('observation'), startDates, -#' output = 'areave', -#' latmin = 27, latmax = 48, -#' lonmin = -12, lonmax = 40) +#'sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), +#' c('observation'), startDates, +#' output = 'areave', +#' latmin = 27, latmax = 48, +#' lonmin = -12, lonmax = 40) #' } #'@import parallel bigmemory methods #'@importFrom stats ts window na.omit @@ -1170,9 +1163,9 @@ Load <- function(var, exp = NULL, obs = NULL, sdates, nmember = NULL, # configfile if (is.null(configfile)) { - configfile <- system.file("config", "BSC.conf", package = "s2dverification") + configfile <- system.file("config", "BSC.conf", package = "s2dv") } else if (!is.character(configfile) || !(nchar(configfile) > 0)) { - stop("Error: parameter 'configfile' must be a character string with the path to an s2dverification configuration file, if specified.") + stop("Error: parameter 'configfile' must be a character string with the path to an s2dv configuration file, if specified.") } # varmin diff --git a/R/MeanDims.R b/R/MeanDims.R index aea09c51ca6d72493c753f2ff46afb264c1f9e6c..ea16c96ab31bc841c64e8898c96062c9c3047b65 100644 --- a/R/MeanDims.R +++ b/R/MeanDims.R @@ -13,12 +13,6 @@ #' dimensions. #' removed. #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2011-04 (V. Guemas, \email{vguemas@@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@@ic3.cat}) - Formatting to R CRAN\cr -#'1.1 - 2015-03 (N. Manubens, \email{nicolau.manubens@@ic3.cat}) - Improved memory usage -#'3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Preserve dimension names #'@examples #'a <- array(rnorm(24), dim = c(a = 2, b= 3, c = 4)) #'print(dim(MeanDims(a, 2))) diff --git a/R/Plot2VarsVsLTime.R b/R/Plot2VarsVsLTime.R deleted file mode 100644 index 1e2fc6d9081b33a4ecae72395e6b052ab4c535f0..0000000000000000000000000000000000000000 --- a/R/Plot2VarsVsLTime.R +++ /dev/null @@ -1,258 +0,0 @@ -#'Plot Two Scores With Confidence Intervals In A Common Plot -#' -#'Plots two input variables having the same dimensions in a common plot.\cr -#'One plot for all experiments.\cr -#'Input variables should have dimensions (nexp/nmod, nltime). -#' -#'@param var1 Matrix of dimensions (nexp/nmod, nltime). -#'@param var2 Matrix of dimensions (nexp/nmod, nltime). -#'@param toptitle Main title, optional. -#'@param ytitle Title of Y-axis, optional. -#'@param monini Starting month between 1 and 12. Default = 1. -#'@param freq 1 = yearly, 12 = monthly, 4 = seasonal, ... Default = 12. -#'@param nticks Number of ticks and labels on the x-axis, optional. -#'@param limits c(lower limit, upper limit): limits of the Y-axis, optional. -#'@param listexp List of experiment names, up to three, optional. -#'@param listvars List of names of input variables, optional. -#'@param biglab TRUE/FALSE for presentation/paper plot. Default = FALSE. -#'@param hlines c(a, b, ...) Add horizontal black lines at Y-positions a, b, -#' ...\cr -#' Default: NULL. -#'@param leg TRUE/FALSE if legend should be added or not to the plot. -#' Default = TRUE. -#'@param siglev TRUE/FALSE if significance level should replace confidence -#' interval.\cr -#' Default = FALSE. -#'@param sizetit Multiplicative factor to change title size, optional. -#'@param show_conf TRUE/FALSE to show/not confidence intervals for input -#' variables. -#'@param fileout Name of output file. Extensions allowed: eps/ps, jpeg, png, -#' pdf, bmp and tiff. \cr -#' Default = 'output_plot2varsvsltime.eps' -#'@param width File width, in the units specified in the parameter size_units -#' (inches by default). Takes 8 by default. -#'@param height File height, in the units specified in the parameter -#' size_units (inches by default). Takes 5 by default. -#'@param size_units Units of the size of the device (file or window) to plot -#' in. Inches ('in') by default. See ?Devices and the creator function of the -#' corresponding device. -#'@param res Resolution of the device (file or window) to plot in. See -#' ?Devices and the creator function of the corresponding device. -#'@param ... Arguments to be passed to the method. Only accepts the following -#' graphical parameters:\cr -#' adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt -#' csi cxy err family fg fig font font.axis font.lab font.main font.sub lend -#' lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page pch plt -#' smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr -#' For more information about the parameters see `par`. -#' -#'@details -#'Examples of input:\cr -#'------------------\cr\cr -#'RMSE error for a number of experiments and along lead-time: (nexp, nltime) -#' -#'@keywords dynamic -#'@author History:\cr -#'1.0 - 2013-03 (I. Andreu-Burillo, \email{isabel.andreu-burillo@@ic3.cat}) -#' - Original code -#'@examples -#'# Load sample data as in Load() example: -#'example(Load) -#'clim <- Clim(sampleData$mod, sampleData$obs) -#'ano_exp <- Ano(sampleData$mod, clim$clim_exp) -#'ano_obs <- Ano(sampleData$obs, clim$clim_obs) -#'runmean_months <- 12 -#'dim_to_smooth <- 4 # Smooth along lead-times -#'smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth) -#'smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth) -#'dim_to_mean <- 2 # Mean along members -#'required_complete_row <- 3 # Discard start dates that contain NA along lead-times -#'leadtimes_per_startdate <- 60 -#'rms <- RMS(Mean1Dim(smooth_ano_exp, dim_to_mean), -#' Mean1Dim(smooth_ano_obs, dim_to_mean), -#' compROW = required_complete_row, -#' limits = c(ceiling((runmean_months + 1) / 2), -#' leadtimes_per_startdate - floor(runmean_months / 2))) -#'smooth_ano_exp_m_sub <- smooth_ano_exp - InsertDim(Mean1Dim(smooth_ano_exp, 2, -#' narm = TRUE), 2, dim(smooth_ano_exp)[2]) -#'spread <- Spread(smooth_ano_exp_m_sub, c(2, 3)) -#' \donttest{ -#'Plot2VarsVsLTime(InsertDim(rms[, , , ], 1, 1), spread$sd, -#' toptitle = 'RMSE and spread', monini = 11, freq = 12, -#' listexp = c('CMIP5 IC3'), listvar = c('RMSE', 'spread'), -#' fileout = 'plot2vars.eps') -#' } -#' -#'@importFrom grDevices png jpeg postscript pdf svg bmp tiff postscript dev.cur dev.new dev.off -#'@importFrom stats ts -#'@export -Plot2VarsVsLTime <- function(var1, var2, toptitle = '', ytitle = '', monini = 1, - freq = 12, nticks = NULL, limits = NULL, listexp = - c('exp1', 'exp2', 'exp3'), listvars = c('var1', - 'var2'), biglab = FALSE, hlines = NULL, leg = TRUE, - siglev = FALSE, sizetit = 1, show_conf = TRUE, - fileout = 'output_plot2varsvsltime.eps', - width = 8, height = 5, size_units = 'in', res = 100, ...) { - # Process the user graphical parameters that may be passed in the call - ## Graphical parameters to exclude - excludedArgs <- c("cex", "cex.axis", "cex.lab", "cex.main", "col", "fin", "lab", "las", "lty", "lwd", "mai", "mgp", "new", "pin", "ps", "pty") - userArgs <- .FilterUserGraphicArgs(excludedArgs, ...) - - # If there is any filenames to store the graphics, process them - # to select the right device - if (!is.null(fileout)) { - deviceInfo <- .SelectDevice(fileout = fileout, width = width, height = height, units = size_units, res = res) - saveToFile <- deviceInfo$fun - fileout <- deviceInfo$files - } - - # - nvars <- 2 - - if (length(dim(var1)) != length(dim(var2))) { - print("the two input variables should have the same dimensions") - stop() - } - if (length(dim(var1)) >= 4) { - print("dimensions of input variables should be 3") - stop() - } - nleadtime <- dim(var1)[3] - nexp <- dim(var1)[1] - var <- array(dim = c(nvars, nexp, 3, nleadtime)) - for (jvar in 1:nvars) { - varname <- paste("var", as.character(jvar), sep = "") - var[jvar, , , ] <- get(varname) - rm(varname) - } - - if (is.null(limits) == TRUE) { - ll <- min(var1, na.rm = TRUE) - ul <- max(var1, na.rm = TRUE) - if (biglab) { - ul <- ul + 0.4 * (ul - ll) - } else { - ul <- ul + 0.3 * (ul - ll) - } - } else { - ll <- limits[1] - ul <- limits[2] - } - lastyear <- (monini + (nleadtime - 1) * 12 / freq - 1) %/% 12 - lastmonth <- (monini + (nleadtime - 1) * 12 / freq - 1) %% 12 + 1 - empty_ts <- ts(start = c(0000, (monini - 1) %/% (12 / freq) + 1), - end = c(lastyear, (lastmonth - 1) %/% (12 / freq) + 1), - frequency = freq) - empty <- array(dim = length(empty_ts)) - # - # Define some plot parameters - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # - if (is.null(nticks)) { - if (biglab) { - nticks <- 5 - } else { - nticks <- 10 - } - } - labind <- seq(1, nleadtime, max(nleadtime %/% nticks, 1)) - months <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", - "Oct", "Nov", "Dec") - labyear <- ((labind - 1) * 12 / freq + monini - 1) %/% 12 - labmonth <- months[((labind - 1) * 12 / freq + monini - 1) %% 12 + 1] - for (jx in 1:length(labmonth)) { - y2o3dig <- paste("0", as.character(labyear[jx]), sep = "") - labmonth[jx] <- paste(labmonth[jx], "\nYr ", substr(y2o3dig, nchar(y2o3dig) - - 1, nchar(y2o3dig)), sep = "") - } - color <- c("red1", "dodgerblue1", "green1", "orange1", "lightblue1", - "deeppink1", "mediumpurple1", "lightgoldenrod1", "olivedrab1", - "mediumorchid1") - type <- c(1, 3) - if (siglev == TRUE) { - lines <- c("n", "l", "n") - } - else{ - lines <- c("l", "l", "l") - } - thickness <- array(dim = c(3)) - thickness[1] <- c(1) - thickness[2] <- c(8) - thickness[3] <- thickness[1] - - # - # Define plot layout - # ~~~~~~~~~~~~~~~~~~~~ - # - - # Open connection to graphical device - if (!is.null(fileout)) { - saveToFile(fileout) - } else if (names(dev.cur()) == 'null device') { - dev.new(units = size_units, res = res, width = width, height = height) - } - - # Load the user parameters - par(userArgs) - - if (biglab) { - par(mai = c(1.25, 1.4, 0.5, 1), mgp = c(4, 2.5, 0)) - par(cex = 1.3, cex.lab = 2, cex.axis = 1.8) - cexmain <- 2.2 - legsize <- 1.5 - } else { - par(mai = c(1, 1.1, 0.5, 0), mgp = c(3, 1.8, 0)) - par(cex = 1.3, cex.lab = 1.5, cex.axis = 1.1) - cexmain <- 1.5 - legsize <- 1 - } - plot(empty, ylim = c(ll, ul), xlab = "Time (months)", ylab = ytitle, - main = toptitle, cex.main = cexmain * sizetit, axes = FALSE) - axis(1, at = labind, labels = labmonth) - axis(2) - box() - if (is.null(hlines) != TRUE) { - for (jy in 1:length(hlines)) { - par(new = TRUE) - abline(h = hlines[jy]) - } - } - # - # Loop on experimental data - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # - legendnames <- array(dim = nexp * nvars) - legendthick <- array(dim = nexp * nvars) - legendsty <- array(dim = nexp * nvars) - legendcol <- array(dim = nexp * nvars) - if (show_conf == TRUE) { - start_line <- 3 - end_line <- 1 - } else { - start_line <- 2 - end_line <- 2 - } - for (jint in seq(start_line, end_line, -1)) { - ind <- 1 - for (jexp in 1:nexp) { - for (jvar in 1:nvars) { - par(new = TRUE) - plot(var[jvar, jexp, jint, ], type = lines[jint], ylim = c(ll, ul), - col = color[jexp], lty = type[jvar], lwd = thickness[jint], - ylab = "", xlab = "", axes = FALSE) - legendnames[ind] <- paste(listexp[jexp], listvars[jvar]) - legendthick[ind] <- 2 - legendsty[ind] <- type[jvar] - legendcol[ind] <- color[jexp] - ind <- ind + 1 - } - } - } - if (leg) { - legend(1, ul, legendnames, lty = legendsty, lwd = legendthick, - col = legendcol, cex = legsize) - } - - # If the graphic was saved to file, close the connection with the device - if(!is.null(fileout)) dev.off() -} diff --git a/R/PlotACC.R b/R/PlotACC.R deleted file mode 100644 index 872327f15830093f9de97fd9bac33d15008edd6b..0000000000000000000000000000000000000000 --- a/R/PlotACC.R +++ /dev/null @@ -1,251 +0,0 @@ -#'Plot Plumes/Timeseries Of Anomaly Correlation Coefficients -#' -#'Plots plumes/timeseries of ACC from an array with dimensions -#'(output from \code{ACC()}): \cr -#'c(nexp, nobs, nsdates, nltime, 4)\cr -#'where the fourth dimension is of length 4 and contains the lower limit of -#'the 95\% confidence interval, the ACC, the upper limit of the 95\% -#'confidence interval and the 95\% significance level given by a one-sided -#'T-test. -#' -#'@param ACC ACC matrix with with dimensions:\cr -#' c(nexp, nobs, nsdates, nltime, 4)\cr -#' with the fourth dimension of length 4 containing the lower limit of the -#' 95\% confidence interval, the ACC, the upper limit of the 95\% confidence -#' interval and the 95\% significance level. -#'@param sdates List of startdates: c('YYYYMMDD','YYYYMMDD'). -#'@param toptitle Main title, optional. -#'@param sizetit Multiplicative factor to scale title size, optional. -#'@param ytitle Title of Y-axis for each experiment: c('',''), optional. -#'@param limits c(lower limit, upper limit): limits of the Y-axis, optional. -#'@param legends List of flags (characters) to be written in the legend, -#' optional. -#'@param freq 1 = yearly, 12 = monthly, 4 = seasonal, ... Default: 12. -#'@param biglab TRUE/FALSE for presentation/paper plot, Default = FALSE. -#'@param fill TRUE/FALSE if filled confidence interval. Default = FALSE. -#'@param linezero TRUE/FALSE if a line at y=0 should be added. Default = FALSE. -#'@param points TRUE/FALSE if points instead of lines. Default = TRUE.\cr -#' Must be TRUE if only 1 leadtime. -#'@param vlines List of x location where to add vertical black lines, optional. -#'@param fileout Name of output file. Extensions allowed: eps/ps, jpeg, png, -#' pdf, bmp and tiff. \cr -#' Default = 'output_PlotACC.eps' -#'@param width File width, in the units specified in the parameter size_units -#' (inches by default). Takes 8 by default. -#'@param height File height, in the units specified in the parameter -#' size_units (inches by default). Takes 5 by default. -#'@param size_units Units of the size of the device (file or window) to plot -#' in. Inches ('in') by default. See ?Devices and the creator function of the -#' corresponding device. -#'@param res Resolution of the device (file or window) to plot in. See -#' ?Devices and the creator function of the corresponding device. -#'@param \dots Arguments to be passed to the method. Only accepts the following -#' graphical parameters:\cr -#' adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt -#' csi cxy err family fg fig fin font font.axis font.lab font.main font.sub -#' lend lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page -#' plt smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog\cr -#' For more information about the parameters see `par`. -#' -#'@keywords dynamic -#'@author History:\cr -#'0.1 - 2013-08 (V. Guemas, \email{virginie.guemas@@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@@ic3.cat}) - Formatting to CRAN -#'@examples -#'# See examples on Load() to understand the first lines in this example -#' \dontrun{ -#'data_path <- system.file('sample_data', package = 's2dverification') -#'expA <- list(name = 'experiment', path = file.path(data_path, -#' 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', -#' '$VAR_NAME$_$START_DATE$.nc')) -#'obsX <- list(name = 'observation', path = file.path(data_path, -#' '$OBS_NAME$/$STORE_FREQ$_mean/$VAR_NAME$', -#' '$VAR_NAME$_$YEAR$$MONTH$.nc')) -#' -#'# Now we are ready to use Load(). -#'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -#'sampleData <- Load('tos', list(expA), list(obsX), startDates, -#' leadtimemin = 1, leadtimemax = 4, output = 'lonlat', -#' latmin = 27, latmax = 48, lonmin = -12, lonmax = 40) -#' } -#' \dontshow{ -#'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -#'sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), -#' c('observation'), startDates, -#' leadtimemin = 1, -#' leadtimemax = 4, -#' output = 'lonlat', -#' latmin = 27, latmax = 48, -#' lonmin = -12, lonmax = 40) -#' } -#'sampleData$mod <- Season(sampleData$mod, 4, 11, 12, 2) -#'sampleData$obs <- Season(sampleData$obs, 4, 11, 12, 2) -#'clim <- Clim(sampleData$mod, sampleData$obs) -#'ano_exp <- Ano(sampleData$mod, clim$clim_exp) -#'ano_obs <- Ano(sampleData$obs, clim$clim_obs) -#'acc <- ACC(Mean1Dim(sampleData$mod, 2), -#' Mean1Dim(sampleData$obs, 2)) -#' \donttest{ -#'PlotACC(acc$ACC, startDates, toptitle = "Anomaly Correlation Coefficient") -#' -#' } -#'@importFrom grDevices dev.cur dev.new dev.off -#'@importFrom stats ts -#'@export -PlotACC <- function(ACC, sdates, toptitle = "", sizetit = 1, ytitle = "", - limits = NULL, legends = NULL, freq = 12, biglab = FALSE, - fill = FALSE, linezero = FALSE, points = TRUE, vlines = NULL, - fileout = "output_PlotACC.eps", - width = 8, height = 5, size_units = 'in', res = 100, ...) { - # Process the user graphical parameters that may be passed in the call - ## Graphical parameters to exclude - excludedArgs <- c("cex", "cex.axis", "cex.lab", "cex.main", "col", "lab", "las", "lty", "lwd", "mai", "mgp", "new", "pch", "pin", "ps", "pty") - userArgs <- .FilterUserGraphicArgs(excludedArgs, ...) - - # If there is any filenames to store the graphics, process them - # to select the right device - if (!is.null(fileout)) { - deviceInfo <- .SelectDevice(fileout = fileout, width = width, height = height, units = size_units, res = res) - saveToFile <- deviceInfo$fun - fileout <- deviceInfo$files - } - - # - if (length(dim(ACC)) != 5 | dim(ACC)[5] != 4) { - stop("5 dim needed : c(nexp, nobs, nsdates, nltime, 4)") - } - nexp <- dim(ACC)[1] - nobs <- dim(ACC)[2] - nleadtime <- dim(ACC)[4] - nsdates <- dim(ACC)[3] - if (is.null(limits) == TRUE) { - ll <- min(ACC, na.rm = TRUE) - ul <- max(ACC, na.rm = TRUE) - if (biglab) { - ul <- ul + 0.3 * (ul - ll) - } else { - ul <- ul + 0.2 * (ul - ll) - } - } else { - ll <- limits[1] - ul <- limits[2] - } - yearinit <- as.integer(substr(sdates[1], 1, 4)) - moninit <- as.integer(substr(sdates[1], 5, 6)) - lastyear <- as.integer(substr(sdates[nsdates], 1, 4)) + (moninit + ( - nleadtime - 1) * 12 / freq - 1) %/% 12 - lastmonth <- (moninit + (nleadtime - 1) * (12 / freq) - 1) %% 12 + 1 - empty_ts <- ts(start = c(yearinit, (moninit - 1) %/% (12 / freq) + 1), - end = c(lastyear, (lastmonth - 1) %/% (12 / freq) + 1), - frequency = freq) - color <- c("red4", "dodgerblue4", "lightgoldenrod4", "deeppink4", - "mediumpurple4", "green4", "orange4", "lightblue4", "mediumorchid4", - "olivedrab4") - colorblock <- c("red1", "dodgerblue1", "lightgoldenrod1", "deeppink1", - "mediumpurple1", "green1", "orange1", "lightblue1", - "mediumorchid1", "olivedrab1") - - # Open connection to graphical device - if (!is.null(fileout)) { - saveToFile(fileout) - } else if (names(dev.cur()) == 'null device') { - dev.new(units = size_units, res = res, width = width, height = height) - } - - # Load the user parameters - par(userArgs) - - if (biglab) { - par(mai = c(1, 1.1, 0.5, 0), mgp = c(2.8, 0.9, 0)) - par(cex = 1.3, cex.lab = 2, cex.axis = 1.8) - cexmain <- 2.2 - legsize <- 1.5 - } else { - par(mai = c(0.8, 0.8, 0.5, 0.1), mgp = c(2, 0.5, 0)) - par(cex = 1.3, cex.lab = 1.5, cex.axis = 1.1) - cexmain <- 1.5 - legsize <- 1 - } - plot(empty_ts, ylim = c(ll, ul), xlab = "Time (years)", ylab = ytitle, - main = toptitle, cex.main = cexmain * sizetit) - for (jexp in 1:nexp) { - for (jobs in 1:nobs) { - numcol <- jobs + (jexp - 1) * nobs - for (jdate in 1:nsdates) { - year0 <- as.integer(substr(sdates[jdate], 1, 4)) - mon0 <- as.integer(substr(sdates[jdate], 5, 6)) - start <- (year0 - yearinit) * freq + 1 - end <- start + nleadtime - 1 - var <- array(dim = c(3, length(empty_ts))) - var[, start:end] <- t(ACC[jexp, jobs, jdate, , 1:3]) - if (fill) { - par(new = TRUE) - bordup <- ACC[jexp, jobs, jdate, , 3] - borddown <- ACC[jexp, jobs, jdate, , 1] - tmp <- c(start:end) - xout <- is.na(bordup + borddown) - tmp <- tmp[which(xout == FALSE)] - xx <- c(tmp, rev(tmp)) - bordup <- bordup[which(xout == FALSE)] - borddown <- borddown[which(xout == FALSE)] - yy <- c(bordup, rev(borddown)) - if (jdate == 1) { - matplot(t(var), type = "l", lty = 1, lwd = 1, ylim = c(ll, ul), - col = color[numcol], xlab = "", ylab = "", axes = FALSE) - } - polygon(xx, yy, col = colorblock[numcol], border = NA) - } - if (points) { - par(new = TRUE) - plot(var[2, ], type = "p", lty = 1, lwd = 6, ylim = c(ll, ul), - col = color[numcol], xlab = "", ylab = "", axes = FALSE, - cex = 0.6) - par(new = TRUE) - plot(var[1, ], type = "p", pch = 6, lwd = 3, ylim = c(ll, ul), - col = color[numcol], xlab = "", ylab = "", axes = FALSE, - cex = 0.6) - par(new = TRUE) - plot(var[3, ], type = "p", pch = 2, lwd = 3, ylim = c(ll, ul), - col = color[numcol], xlab = "", ylab = "", axes = FALSE, - cex = 0.6) - par(new = TRUE) - for (jind in start:end) { - lines(c(jind, jind), var[c(1, 3), jind], lwd = 1, - ylim = c(ll, ul), col = color[numcol], xlab = "", - ylab = "", axes = FALSE) - } - } else { - par(new = TRUE) - plot(var[2, ], type = "l", lty = 1, lwd = 4, ylim = c(ll, ul), - col = color[numcol], xlab = "", ylab = "", axes = FALSE) - par(new = TRUE) - plot(var[1, ], type = "l", lty = 1, lwd = 1, ylim = c(ll, ul), - col = color[numcol], xlab = "", ylab = "", axes = FALSE) - par(new = TRUE) - plot(var[3, ], type = "l", lty = 1, lwd = 1, ylim = c(ll, ul), - col = color[numcol], xlab = "", ylab = "", axes = FALSE) - } - } - } - } - if (linezero) { - abline(h = 0, col = "black") - } - if (is.null(vlines) == FALSE) { - for (x in vlines) { - abline(v = x, col = "black") - } - } - if (is.null(legends) == FALSE) { - if (points) { - legend(0, ul, legends[1:(nobs * nexp)], lty = 3, lwd = 10, - col = color[1:(nobs * nexp)], cex = legsize) - } else { - legend(0, ul, legends[1:(nobs * nexp)], lty = 1, lwd = 4, - col = color[1:(nobs * nexp)], cex = legsize) - } - } - - # If the graphic was saved to file, close the connection with the device - if(!is.null(fileout)) dev.off() -} diff --git a/R/PlotAno.R b/R/PlotAno.R deleted file mode 100644 index 922806aa227ec5ab9993a87212f110cb718f9561..0000000000000000000000000000000000000000 --- a/R/PlotAno.R +++ /dev/null @@ -1,304 +0,0 @@ -#'Plot Raw Or Smoothed Anomalies -#' -#'Plots timeseries of raw or smoothed anomalies of any variable output from -#'\code{Load()} or \code{Ano()} or or \code{Ano_CrossValid()} or -#'\code{Smoothing()}. -#' -#'@param exp_ano Array containing the experimental data:\cr -#' c(nmod/nexp, nmemb/nparam, nsdates, nltime). -#'@param obs_ano Optional matrix containing the observational data:\cr -#' c(nobs, nmemb, nsdates, nltime) -#'@param sdates List of starting dates: c('YYYYMMDD','YYYYMMDD'). -#'@param toptitle Main title for each experiment: c('',''), optional. -#'@param ytitle Title of Y-axis for each experiment: c('',''), optional. -#'@param limits c(lower limit, upper limit): limits of the Y-axis, optional. -#'@param legends List of observational dataset names, optional. -#'@param freq 1 = yearly, 12 = monthly, 4 = seasonal, ... Default: 12. -#'@param biglab TRUE/FALSE for presentation/paper plot. Default = FALSE. -#'@param fill TRUE/FALSE if the spread between members should be filled. -#' Default = TRUE. -#'@param memb TRUE/FALSE if all members/only the ensemble-mean should be -#' plotted.\cr -#' Default = TRUE. -#'@param ensmean TRUE/FALSE if the ensemble-mean should be plotted. -#' Default = TRUE. -#'@param linezero TRUE/FALSE if a line at y=0 should be added. -#' Default = FALSE. -#'@param points TRUE/FALSE if points instead of lines should be shown. -#' Default = FALSE. -#'@param vlines List of x location where to add vertical black lines, optional. -#'@param sizetit Multiplicative factor to scale title size, optional. -#'@param fileout Name of the output file for each experiment: c('',''). -#' Extensions allowed: eps/ps, jpeg, png, pdf, bmp and tiff. If filenames -#' with different extensions are passed, it will be considered only the first -#' one and it will be extended to the rest. \cr -#' Default = c('output1_plotano.eps', 'output2_plotano.eps', -#' 'output3_plotano.eps', 'output4_plotano.eps', -#' 'output5_plotano.eps') -#'@param width File width, in the units specified in the parameter size_units -#' (inches by default). Takes 8 by default. -#'@param height File height, in the units specified in the parameter -#' size_units (inches by default). Takes 5 by default. -#'@param size_units Units of the size of the device (file or window) to plot -#' in. Inches ('in') by default. See ?Devices and the creator function of the -#' corresponding device. -#'@param res Resolution of the device (file or window) to plot in. See -#' ?Devices and the creator function of the corresponding device. -#'@param \dots Arguments to be passed to the method. Only accepts the following -#' graphical parameters:\cr -#' adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt -#' csi cxy err family fg fig font font.axis font.lab font.main font.sub lend -#' lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page plt smo -#' srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr -#' For more information about the parameters see `par`. -#' -#'@keywords dynamic -#'@author History:\cr -#'0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@@ic3.cat}) - Formatting to CRAN -#'@examples -#'# Load sample data as in Load() example: -#'example(Load) -#'clim <- Clim(sampleData$mod, sampleData$obs) -#'ano_exp <- Ano(sampleData$mod, clim$clim_exp) -#'ano_obs <- Ano(sampleData$obs, clim$clim_obs) -#'runmean_nb_months <- 12 -#'dim_to_smooth <- 4 # Smooth along lead-times -#'smooth_ano_exp <- Smoothing(ano_exp, runmean_nb_months, dim_to_smooth) -#'smooth_ano_obs <- Smoothing(ano_obs, runmean_nb_months, dim_to_smooth) -#' \donttest{ -#'PlotAno(smooth_ano_exp, smooth_ano_obs, startDates, -#' toptitle = paste('smoothed anomalies'), ytitle = c('K', 'K', 'K'), -#' legends = 'ERSST', biglab = FALSE, fileout = 'tos_ano.eps') -#' } -#' -#'@importFrom grDevices dev.cur dev.new dev.off -#'@importFrom stats ts -#'@export -PlotAno <- function(exp_ano, obs_ano = NULL, sdates, toptitle = rep('', 15), - ytitle = rep('', 15), limits = NULL, legends = NULL, - freq = 12, biglab = FALSE, fill = TRUE, memb = TRUE, - ensmean = TRUE, linezero = FALSE, points = FALSE, - vlines = NULL, sizetit = 1, - fileout = paste0('output', 1:5, '_plotano.eps'), - width = 8, height = 5, size_units = 'in', res = 100, ...) { - # Process the user graphical parameters that may be passed in the call - ## Graphical parameters to exclude - excludedArgs <- c("cex", "cex.axis", "cex.lab", "cex.main", "col", "fin", "lab", "las", "lty", "lwd", "mai", "mgp", "new", "pch", "pin", "ps", "pty") - userArgs <- .FilterUserGraphicArgs(excludedArgs, ...) - - # If there is any filenames to store the graphics, process them - # to select the right device - if (!is.null(fileout)) { - deviceInfo <- .SelectDevice(fileout = fileout, width = width, height = height, units = size_units, res = res) - saveToFile <- deviceInfo$fun - fileout <- deviceInfo$files - } - - # - # Get some arguments - # ~~~~~~~~~~~~~~~~~~~~ - # - if (length(dim(exp_ano)) != 4 ) { - stop("4 dim needed : c(nexp/nobs, nmemb, nsdates, nltime)") - } - nexp <- dim(exp_ano)[1] - nmemb <- dim(exp_ano)[2] - nleadtime <- dim(exp_ano)[4] - nsdates <- dim(exp_ano)[3] - if (is.null(obs_ano) == FALSE) { - nobs <- dim(obs_ano)[1] - if (length(dim(obs_ano)) != 4 ) { - stop("4 dim needed : c(nexp/nobs, nmemb, nsdates, nltime)") - } - if (dim(obs_ano)[3] != nsdates | dim(obs_ano)[4] != nleadtime ) { - stop("obs and exp must have same number of sdates & ltimes") - } - } else { - nobs <- 0 - } - if (is.null(limits) == TRUE) { - if (memb) { - ll <- min(min(exp_ano, na.rm = TRUE), min(obs_ano, na.rm = TRUE), na.rm = TRUE) - ul <- max(max(exp_ano, na.rm = TRUE), max(obs_ano, na.rm = TRUE), na.rm = TRUE) - } - else{ - ll <- min(min(Mean1Dim(exp_ano, 2), na.rm = TRUE), min(obs_ano, na.rm = TRUE), - na.rm = TRUE) - ul <- max(max(Mean1Dim(exp_ano, 2), na.rm = TRUE), max(obs_ano, na.rm = TRUE), - na.rm = TRUE) - } - if (nobs > 0) { - if (biglab) { - ul <- ul + 0.3 * (ul - ll) - } else { - ul <- ul + 0.2 * (ul - ll) - } - } - } else { - ll <- limits[1] - ul <- limits[2] - } - # - # Define some plot parameters - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # - yearinit <- as.integer(substr(sdates[1], 1, 4)) - moninit <- as.integer(substr(sdates[1], 5, 6)) - lastyear <- as.integer(substr(sdates[nsdates], 1, 4)) + (moninit + ( - nleadtime - 1) * 12 / freq - 1) %/% 12 - lastmonth <- (moninit + (nleadtime - 1) * (12 / freq) - 1) %% 12 + 1 - empty_ts <- ts(start = c(yearinit, (moninit - 1) %/% (12 / freq) + 1), - end = c(lastyear, (lastmonth - 1) %/% (12 / freq) + 1), - frequency = freq) - color <- c("red4", "orange4", "lightgoldenrod4", "olivedrab4", "green4", - "lightblue4", "dodgerblue4", "mediumpurple4", "mediumorchid4", - "deeppink4") - color <- c(color, color, color, color, color, color, color, color, color, - color, color) - colorblock <- c("red1", "orange1", "lightgoldenrod1", "olivedrab1", "green1", - "lightblue1", "dodgerblue1", "mediumpurple1", "mediumorchid1", - "deeppink1") - colorblock <- c(colorblock, colorblock, colorblock, colorblock, colorblock, - colorblock, colorblock, colorblock, colorblock, colorblock) - type <- c(1, 3, 2, 4) - thickness <- c(1, 3, 2, 2) - # - # Loop on the experiments : one plot for each - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # - for (jexp in 1:nexp) { - # - # Define plot layout - # ~~~~~~~~~~~~~~~~~~~~ - # - - # Open connection to graphical device - if (!is.null(fileout)) { - saveToFile(fileout[jexp]) - } else if (names(dev.cur()) == 'null device') { - dev.new(units = size_units, res = res, width = width, height = height) - } - - - # Load the user parameters - par(userArgs) - - if (biglab) { - par(mai = c(1, 1.1, 0.5, 0), mgp = c(2.8, 0.9, 0)) - par(cex = 1.3, cex.lab = 2, cex.axis = 1.8) - cexmain <- 2.2 - legsize <- 1.5 - } else { - par(mai = c(0.8, 0.8, 0.5, 0.3), mgp = c(2, 0.5, 0)) - par(cex = 1.3, cex.lab = 1.5, cex.axis = 1.1) - cexmain <- 1.5 - legsize <- 1 - } - plot(empty_ts, ylim = c(ll, ul), xlab = "Time (years)", ylab = ytitle[jexp], - main = toptitle[jexp], cex.main = cexmain * sizetit) - # - # Plot experimental data + all observational datasets sdate by sdate - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # - for (jdate in 1:nsdates) { - year0 <- as.integer(substr(sdates[jdate], 1, 4)) - mon0 <- as.integer(substr(sdates[jdate], 5, 6)) - start <- (year0 - yearinit) * freq + 1 - end <- start + nleadtime - 1 - var <- array(dim = c(nmemb, length(empty_ts))) - var[, start:end] <- exp_ano[jexp, , jdate, ] - # - # Compute parameters for filling max-min over members - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # - if (fill) { - par(new = TRUE) - bordup <- array(dim = nleadtime) - borddown <- array(dim = nleadtime) - for (jt in 1:nleadtime) { - bordup[jt] <- max(exp_ano[jexp, , jdate, jt], na.rm = TRUE) - borddown[jt] <- min(exp_ano[jexp, , jdate, jt], na.rm = TRUE) - } - tmp <- c(start:end) - xout <- is.na(bordup + borddown) - tmp <- tmp[which(xout == FALSE)] - xx <- c(tmp, rev(tmp)) - bordup <- bordup[which(xout == FALSE)] - borddown <- borddown[which(xout == FALSE)] - yy <- c(bordup, rev(borddown)) - # - # Plotting - # ~~~~~~~~~~ - # - if (jdate == 1) { - matplot(t(var), type = "l", lty = 1, lwd = 1, ylim = c(ll, ul), - col = color[jdate], xlab = "", ylab = "", axes = FALSE) - } - # Max-min member range - polygon(xx, yy, col = colorblock[jdate], border = NA) - } - if (ensmean) { # Ensemble-mean - par(new = TRUE) - if (points) { - plot(Mean1Dim(t(var), 2), type = "p", lty = 1, lwd = 4, - ylim = c(ll, ul), col = color[jdate], xlab = "", ylab = "", - axes = FALSE) - } else { - plot(Mean1Dim(t(var), 2), type = "l", lty = 1, lwd = 4, - ylim = c(ll, ul), col = color[jdate], xlab = "", ylab = "", - axes = FALSE) - } - } - if (memb) { - par(new = TRUE) # All members - if (points) { - matpoints(t(var), type = "p", lty = 1, lwd = 1, pch = 20, - ylim = c(ll, ul), col = color[jdate], xlab = "", ylab = "", - axes = FALSE) - } else { - matplot(t(var), type = "l", lty = 1, lwd = 1, ylim = c(ll, ul), - col = color[jdate], xlab = "", ylab = "", axes = FALSE) - } - } - if (nobs > 0) { - for (jobs in 1:nobs) { - for (jmemb in 1:dim(obs_ano)[2]) { - var <- array(dim = length(empty_ts)) - var[start:end] <- obs_ano[jobs, jmemb, jdate, ] - par(new = TRUE) # Observational datasets - if (points) { - plot(var, type = "p", lty = 1, lwd = 4, pch = 20, - ylim = c(ll, ul), col = 1, ylab = "", xlab = "", - axes = FALSE) - } else { - plot(var, lty = type[jobs], lwd = thickness[jobs], type = "l", - ylim = c(ll, ul), col = 1, ylab = "", xlab = "", - axes = FALSE) - } - } - } - } - } - if (linezero) { - abline(h = 0, col = "black") - } - if (is.null(vlines) == FALSE) { - for (x in vlines) { - abline(v = x, col = "black") - } - } - if (is.null(legends) == FALSE) { - if (points) { - legend('topleft', legends[1:nobs], lty = 3, lwd = 10, col = 1, - cex = legsize) - } else { - legend('topleft', ul, legends[1:nobs], lty = type[1:nobs], - lwd = thickness[1:nobs], col = 1, cex = legsize) - } - } - - # If the graphic was saved to file, close the connection with the device - if(!is.null(fileout)) dev.off() - } -} diff --git a/R/PlotBoxWhisker.R b/R/PlotBoxWhisker.R deleted file mode 100644 index 46c5335507e395c5dd281d31e7ea7664c809668a..0000000000000000000000000000000000000000 --- a/R/PlotBoxWhisker.R +++ /dev/null @@ -1,243 +0,0 @@ -#'Box-And-Whisker Plot of Time Series with Ensemble Distribution -#' -#'Produce time series of box-and-whisker plot showing the distribution of the -#'members of a forecast vs. the observed evolution. The correlation between -#'forecast and observational data is calculated and displayed. Only works for -#'n-monthly to n-yearly time series. -#' -#'@param exp Forecast array of multi-member time series, e.g., the NAO index -#' of one experiment. The expected dimensions are -#' c(members, start dates/forecast horizons). A vector with only the time -#' dimension can also be provided. Only monthly or lower frequency time -#' series are supported. See parameter freq. -#'@param obs Observational vector or array of time series, e.g., the NAO index -#' of the observations that correspond the forecast data in \code{exp}. -#' The expected dimensions are c(start dates/forecast horizons) or -#' c(1, start dates/forecast horizons). Only monthly or lower frequency time -#' series are supported. See parameter freq. -#'@param toptitle Character string to be drawn as figure title. -#'@param ytitle Character string to be drawn as y-axis title. -#'@param monini Number of the month of the first time step, from 1 to 12. -#'@param yearini Year of the first time step. -#'@param freq Frequency of the provided time series: 1 = yearly, 12 = monthly, -# 4 = seasonal, ... Default = 12. -#'@param expname Experimental dataset name. -#'@param obsname Name of the observational reference dataset. -#'@param drawleg TRUE/FALSE: whether to draw the legend or not. -#'@param fileout Name of output file. Extensions allowed: eps/ps, jpeg, png, -#' pdf, bmp and tiff. \cr -#' Default = 'output_PlotBox.ps'. -#'@param width File width, in the units specified in the parameter size_units -#' (inches by default). Takes 8 by default. -#'@param height File height, in the units specified in the parameter -#' size_units (inches by default). Takes 5 by default. -#'@param size_units Units of the size of the device (file or window) to plot -#' in. Inches ('in') by default. See ?Devices and the creator function of the -#' corresponding device. -#'@param res Resolution of the device (file or window) to plot in. See -#' ?Devices and the creator function of the corresponding device. -#'@param ... Arguments to be passed to the method. Only accepts the following -#' graphical parameters:\cr -#' ann ask bg cex.lab cex.sub cin col.axis col.lab col.main col.sub cra crt -#' csi cxy err family fg fig font font.axis font.lab font.main font.sub lend -#' lheight ljoin lmitre mex mfcol mfrow mfg mkh oma omd omi page pin plt pty -#' smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr -#' For more information about the parameters see `par`. -#' -#'@return Generates a file at the path specified via \code{fileout}. -#' -#'@seealso EOF, ProjectField, NAO -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2013-09 (F. Lienert, \email{flienert@@ic3.cat}) - Original code\cr -#'0.2 - 2015-03 (L. Batte, \email{lauriane.batte@@ic3.cat}) - Removed all\cr -#' normalization for sake of clarity. -#'1.0 - 2016-03 (N. Manubens, \email{nicolau.manubens@@bsc.es}) - Formatting to R CRAN -#'@examples -#'# See examples on Load() to understand the first lines in this example -#' \dontrun{ -#'data_path <- system.file('sample_data', package = 's2dverification') -#'expA <- list(name = 'experiment', path = file.path(data_path, -#' 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', -#' '$VAR_NAME$_$START_DATE$.nc')) -#'obsX <- list(name = 'observation', path = file.path(data_path, -#' '$OBS_NAME$/$STORE_FREQ$_mean/$VAR_NAME$', -#' '$VAR_NAME$_$YEAR$$MONTH$.nc')) -#' -#'# Now we are ready to use Load(). -#'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -#'sampleData <- Load('tos', list(expA), list(obsX), startDates, -#' leadtimemin = 1, leadtimemax = 4, output = 'lonlat', -#' latmin = 27, latmax = 48, lonmin = -12, lonmax = 40) -#' } -#' \dontshow{ -#'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -#'sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), -#' c('observation'), startDates, -#' leadtimemin = 1, -#' leadtimemax = 4, -#' output = 'lonlat', -#' latmin = 20, latmax = 80, -#' lonmin = -80, lonmax = 40) -#'# No example data is available over NAO region, so in this example we will -#'# tweak the available data. In a real use case, one can Load() the data over -#'# NAO region directly. -#'sampleData$lon[] <- c(40, 280, 340) -#'attr(sampleData$lon, 'first_lon') <- 280 -#'attr(sampleData$lon, 'last_lon') <- 40 -#'attr(sampleData$lon, 'data_across_gw') <- TRUE -#'sampleData$lat[] <- c(20, 80) -#'attr(sampleData$lat, 'first_lat') <- 20 -#'attr(sampleData$lat, 'last_lat') <- 80 -#' } -#'# Now ready to compute the EOFs and project on, for example, the first -#'# variability mode. -#'ano <- Ano_CrossValid(sampleData$mod, sampleData$obs) -#'nao <- NAO(ano$ano_exp, ano$ano_obs, sampleData$lon, sampleData$lat) -#'# Finally plot the nao index -#' \donttest{ -#'PlotBoxWhisker(nao$NAO_exp, nao$NAO_obs, "NAO index, DJF", "NAO index (PC1) TOS", -#' monini = 12, yearini = 1985, freq = 1, "Exp. A", "Obs. X") -#' } -#' -#'@importFrom grDevices dev.cur dev.new dev.off -#'@importFrom stats cor -#'@export -PlotBoxWhisker <- function(exp, obs, toptitle = '', ytitle = '', monini = 1, - yearini = 0, freq = 1, expname = "exp 1", - obsname = "obs 1", drawleg = TRUE, - fileout = "output_PlotBoxWhisker.ps", - width = 8, height = 5, size_units = 'in', res = 100, ...) { - - # Process the user graphical parameters that may be passed in the call - ## Graphical parameters to exclude - excludedArgs <- c("adj", "bty", "cex", "cex.axis", "cex.main", "col", "din", "fin", "lab", "las", "lty", "lwd", "mai", "mar", "mgp", "new", "pch", "ps") - userArgs <- .FilterUserGraphicArgs(excludedArgs, ...) - - # If there is any filenames to store the graphics, process them - # to select the right device - if (!is.null(fileout)) { - deviceInfo <- .SelectDevice(fileout = fileout, width = width, height = height, units = size_units, res = res) - saveToFile <- deviceInfo$fun - fileout <- deviceInfo$files - } - - # Checking exp - if (is.numeric(exp)) { - if (is.null(dim(exp)) || length(dim(exp)) == 1) { - dim(exp) <- c(1, length(exp)) - } - } - if (!is.numeric(exp) || length(dim(exp)) != 2) { - stop("Parameter 'exp' must be a numeric vector or array of dimensions c(forecast horizons/start dates) or c(ensemble members, forecast horizons/start dates)") - } - - # Checking obs - if (is.numeric(obs)) { - if (is.null(dim(obs)) || length(dim(obs)) == 1) { - dim(obs) <- c(1, length(obs)) - } - } - if (!is.numeric(obs) || length(dim(obs)) != 2) { - stop("Parameter 'obs' must be a numeric vector or array of dimensions c(forecast horizons/start dates) or c(1, forecast horizons/start dates)") - } - - # Checking consistency in exp and obs - if (dim(exp)[2] != dim(obs)[2]) { - stop("'exp' and 'obs' must have data for the same amount of time steps.") - } - - if (!is.character(toptitle) || !is.character(ytitle)) { - stop("Parameters 'ytitle' and 'toptitle' must be character strings.") - } - - if (!is.numeric(monini)) { - stop("'monini' must be a month number, from 1 to 12.") - } - if (monini < 1 || monini > 12) { - stop("'monini' must be >= 1 and <= 12.") - } - - if (!is.numeric(yearini)) { - stop("'yearini' must be a month number, from 1 to 12.") - } - - if (!is.numeric(freq)) { - stop("'freq' must be a number <= 12.") - } - - if (!is.character(expname) || !is.character(obsname)) { - stop("'expname' and 'obsname' must be character strings.") - } - - if (!is.logical(drawleg)) { - stop("Parameter 'drawleg' must be either TRUE or FALSE.") - } - - if (!is.character(fileout) && !is.null(fileout)) { - stop("Parameter 'fileout' must be a character string.") - } - - ntimesteps <- dim(exp)[2] - lastyear <- (monini + (ntimesteps - 1) * 12 / freq - 1) %/% 12 + yearini - lastmonth <- (monini + (ntimesteps - 1) * 12 / freq - 1) %% 12 + 1 - # - # Define some plot parameters - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # - labind <- seq(1, ntimesteps) - months <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", - "Oct", "Nov", "Dec") - labyear <- ((labind - 1) * 12 / freq + monini - 1) %/% 12 + yearini - labmonth <- months[((labind - 1) * 12 / freq + monini - 1) %% 12 + 1] - for (jx in 1:length(labmonth)) { - y2o3dig <- paste("0", as.character(labyear[jx]), sep = "") - labmonth[jx] <- paste(labmonth[jx], "\nYr ", substr(y2o3dig, - nchar(y2o3dig) - 1, nchar(y2o3dig)), sep = "") - } - - # Open connection to graphical device - if (!is.null(fileout)) { - saveToFile(fileout) - } else if (names(dev.cur()) == 'null device') { - dev.new(units = size_units, res = res, width = width, height = height) - } - - # Load the user parameters - par(userArgs) - - ## Observed time series. - #pc.o <- ts(obs[1, ], deltat = 1, start = yr1, end = yr2) - pc.o <- obs[1, ] - ## Normalization of obs, forecast members. Fabian - ## Normalization of forecast should be according to ensemble - ## mean, to keep info on ensemble spread, no? Lauriane pc.o <- - ## pc.o/sd(pc.o) sd.fc <- apply(exp,c(1),sd) - ## exp <- exp/sd.fc mn.fc <- - ## apply(exp,2, mean) exp <- - ## exp/sd(mn.fc) Produce plot. - par(mar = c(5, 6, 4, 2)) - boxplot(exp, add = FALSE, main = toptitle, - ylab = "", xlab = "", col = "red", lwd = 2, t = "b", - axes = FALSE, cex.main = 2, ylim = c(-max(abs(c(exp, pc.o))), max(abs(c(exp, pc.o))))) - lines(1:ntimesteps, pc.o, lwd = 3, col = "blue") - abline(h = 0, lty = 1) - if (drawleg) { - legend("bottomleft", c(obsname, expname), lty = c(1, 1), lwd = c(3, - 3), pch = c(NA, NA), col = c("blue", "red"), horiz = FALSE, - bty = "n", inset = 0.05) - } - ##mtext(1, line = 3, text = tar, cex = 1.9) - mtext(3, line = -2, text = paste(" AC =", round(cor(pc.o, - apply(exp, c(2), mean)), 2)), cex = 1.9, adj = 0) - axis(2, cex.axis = 2) - mtext(2, line = 3, text = ytitle, cex = 1.9) - par(mgp = c(0, 4, 0)) - ##axis(1, c(1:ntimesteps), NA, cex.axis = 2) - axis(1, seq(1, ntimesteps, by = 1), labmonth, cex.axis = 2) - box() - - # If the graphic was saved to file, close the connection with the device - if(!is.null(fileout)) dev.off() -} - diff --git a/R/PlotClim.R b/R/PlotClim.R index a002429f5f80d1a11692ec1624a58d225f08d083..45eb2ce3e81823e0f1c23c7200001714d18d7fe9 100644 --- a/R/PlotClim.R +++ b/R/PlotClim.R @@ -41,10 +41,6 @@ #' smo srt tck usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr #' For more information about the parameters see `par`. #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@@ic3.cat}) - Formatting to CRAN #'@examples #'# Load sample data as in Load() example: #'example(Load) diff --git a/R/PlotEquiMap.R b/R/PlotEquiMap.R index 8f3fcb9ab0f754dc7184784d9a4050e212a22e08..37847dcc40166713a346b3859c38a75f85699fcc 100644 --- a/R/PlotEquiMap.R +++ b/R/PlotEquiMap.R @@ -173,18 +173,10 @@ #' drawn at all). #'} #' -#'@keywords dynamic -#'@author History:\cr -#' 0.1 - 2011-11 (V. Guemas, \email{virginie.guemas@@ic3.cat}) - Original code\cr -#' 0.2 - 2013-04 (R. Saurral \email{ramiro.saurral@@ic3.cat}) - LabW\cr -#' 1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@@ic3.cat}) - Formatting to R CRAN\cr -#' 1.1 - 2013-09 (C. Prodhomme, \email{chloe.prodhomme@@ic3.cat}) - add winds\cr -#' 1.2 - 2016-08 (N. Manubens, \email{nicolau.manubens@@bsc.es}) - Refactored and added features, -#' and adapted to new ColorBar. #'@examples #'# See examples on Load() to understand the first lines in this example #' \dontrun{ -#'data_path <- system.file('sample_data', package = 's2dverification') +#'data_path <- system.file('sample_data', package = 's2dv') #'expA <- list(name = 'experiment', path = file.path(data_path, #' 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', #' '$VAR_NAME$_$START_DATE$.nc')) @@ -200,7 +192,7 @@ #' } #' \dontshow{ #'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -#'sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), +#'sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), #' c('observation'), startDates, #' leadtimemin = 1, #' leadtimemax = 4, diff --git a/R/PlotLayout.R b/R/PlotLayout.R index 0e10c3041b72e55f9ca7690060549e68a26a4a6c..0d333cb6a5a67dd3479752c3b4beacaa1ecc948d 100644 --- a/R/PlotLayout.R +++ b/R/PlotLayout.R @@ -3,11 +3,11 @@ #'This function takes an array or list of arrays and loops over each of them #'to plot all the sub-arrays they contain on an automatically generated #'multi-pannel layout. A different plot function (not necessarily from -#'s2dverification) can be applied over each of the provided arrays. The input +#'s2dv) can be applied over each of the provided arrays. The input #'dimensions of each of the functions have to be specified, either with the #'names or the indices of the corresponding input dimensions. It is possible #'to draw a common colour bar at any of the sides of the multi-pannel for all -#'the s2dverification plots that use a colour bar. Common plotting arguments +#'the s2dv plots that use a colour bar. Common plotting arguments #'for all the arrays in 'var' can be specified via the '...' parameter, and #'specific plotting arguments for each array can be fully adjusted via #''special_args'. It is possible to draw titles for each of the figures, @@ -161,13 +161,10 @@ #' cells as current figure to add plot elements. See .SwitchToFigure. #'} #' -#'@keywords dynamic -#'@author History:\cr -#' 0.1 - 2016-08 (N. Manubens, \email{nicolau.manubens@@bsc.es}) - Original code #'@examples #'# See examples on Load() to understand the first lines in this example #' \dontrun{ -#'data_path <- system.file('sample_data', package = 's2dverification') +#'data_path <- system.file('sample_data', package = 's2dv') #'expA <- list(name = 'experiment', path = file.path(data_path, #' 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', #' '$VAR_NAME$_$START_DATE$.nc')) @@ -183,7 +180,7 @@ #' } #' \dontshow{ #'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -#'sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), +#'sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), #' c('observation'), startDates, #' leadtimemin = 1, #' leadtimemax = 4, diff --git a/R/PlotMatrix.R b/R/PlotMatrix.R index 8c830d74b4761f1f3d19a98f4f5a3cbef271d5f6..269d059c33802b67af32b064b129eef600f7d104 100644 --- a/R/PlotMatrix.R +++ b/R/PlotMatrix.R @@ -45,7 +45,7 @@ #'@param res A positive number indicating resolution of the device (file or window) #' to plot in. See ?Devices and the creator function of the corresponding device. #'@param ... The additional parameters to be passed to function ColorBar() in -#' s2dverification for color legend creation. +#' s2dv for color legend creation. #'@return A figure in popup window by default, or saved to the specified path. #' #'@examples diff --git a/R/PlotSection.R b/R/PlotSection.R index 46a1e70a54b5713f267d9896fbd41cc3174543df..29bdd4929e2495b40830ec70dd663c101be47c11 100644 --- a/R/PlotSection.R +++ b/R/PlotSection.R @@ -36,12 +36,8 @@ #' yaxt ylbias ylog \cr #' For more information about the parameters see `par`. #' -#'@keywords dynamic -#'@author History:\cr -#'0.1 - 2012-09 (V. Guemas, \email{virginie.guemas@@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@@ic3.cat}) - Formatting to CRAN #'@examples -#'sampleData <- s2dverification::sampleDepthData +#'sampleData <- s2dv::sampleDepthData #'PlotSection(sampleData$mod[1, 1, 1, 1, , ], sampleData$lat, sampleData$depth, #' toptitle = 'temperature 1995-11 member 0') #'@importFrom grDevices dev.cur dev.new dev.off rainbow diff --git a/R/PlotStereoMap.R b/R/PlotStereoMap.R index 5572f89a56b3fedb77abcf753b92f85420dca151..d4e8e2d243024b18eb0d0cf510772b8f245c486b 100644 --- a/R/PlotStereoMap.R +++ b/R/PlotStereoMap.R @@ -127,13 +127,6 @@ #' drawn at all). #'} #' -#'@keywords dynamic -#'@author History:\cr -#'1.0 - 2014-07 (V. Guemas, \email{virginie.guemas@@ic3.cat}) - Original code\cr -#'1.1 - 2015-12 (C. Ardilouze, \email{constantin.ardilouze@@meteo.fr}) - Box(es) drawing\cr -#'1.2 - 2016-08 (N. Manubens, \email{nicolau.manubens@@bsc.es}) - Refacotred the function and -#' merged in Jean-Philippe circle -#' border and Constantin boxes. #'@examples #'data <- matrix(rnorm(100 * 50), 100, 50) #'x <- seq(from = 0, to = 360, length.out = 100) diff --git a/R/PlotVsLTime.R b/R/PlotVsLTime.R deleted file mode 100644 index 8920438ca315a4be531eb9ba72584df8b33dfb1d..0000000000000000000000000000000000000000 --- a/R/PlotVsLTime.R +++ /dev/null @@ -1,271 +0,0 @@ -#'Plots A Score Along The Forecast Time With Its Confidence Interval -#' -#'Plots The Correlation (\code{Corr()}) or the Root Mean Square Error -#'(\code{RMS()}) between the forecasted values and their observational -#'counterpart or the slopes of their trends (\code{Trend()}) or the -#'InterQuartile Range, Maximum-Mininum, Standard Deviation or Median Absolute -#'Deviation of the Ensemble Members (\code{Spread()}), or the ratio between -#'the Ensemble Spread and the RMSE of the Ensemble Mean (\code{RatioSDRMS()}) -#'along the forecast time for all the input experiments on the same figure -#'with their confidence intervals. -#' -#'@param var Matrix containing any Prediction Score with dimensions:\cr -#' (nexp/nmod, 3/4 ,nltime)\cr -#' or (nexp/nmod, nobs, 3/4 ,nltime). -#'@param toptitle Main title, optional. -#'@param ytitle Title of Y-axis, optional. -#'@param monini Starting month between 1 and 12. Default = 1. -#'@param freq 1 = yearly, 12 = monthly, 4 = seasonal, ... Default = 12. -#'@param nticks Number of ticks and labels on the x-axis, optional. -#'@param limits c(lower limit, upper limit): limits of the Y-axis, optional. -#'@param listexp List of experiment names, optional. -#'@param listobs List of observation names, optional. -#'@param biglab TRUE/FALSE for presentation/paper plot. Default = FALSE. -#'@param hlines c(a,b, ..) Add horizontal black lines at Y-positions a,b, ...\cr -#' Default = NULL. -#'@param leg TRUE/FALSE if legend should be added or not to the plot. -#' Default = TRUE. -#'@param siglev TRUE/FALSE if significance level should replace confidence -#' interval.\cr -#' Default = FALSE. -#'@param sizetit Multiplicative factor to change title size, optional. -#'@param show_conf TRUE/FALSE to show/not confidence intervals for input -#' variables. -#'@param fileout Name of output file. Extensions allowed: eps/ps, jpeg, png, -#' pdf, bmp and tiff.\cr -#' Default = 'output_plotvsltime.eps' -#'@param width File width, in the units specified in the parameter size_units -#' (inches by default). Takes 8 by default. -#'@param height File height, in the units specified in the parameter -#' size_units (inches by default). Takes 5 by default. -#'@param size_units Units of the size of the device (file or window) to plot -#' in. Inches ('in') by default. See ?Devices and the creator function of the -#' corresponding device. -#'@param res Resolution of the device (file or window) to plot in. See -#' ?Devices and the creator function of the corresponding device. -#'@param ... Arguments to be passed to the method. Only accepts the following -#' graphical parameters:\cr -#' adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt -#' csi cxy err family fg fig font font.axis font.lab font.main font.sub -#' lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page pch plt -#' smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr -#' For more information about the parameters see `par`. -#' -#'@details -#'Examples of input:\cr -#'Model and observed output from \code{Load()} then \code{Clim()} then -#'\code{Ano()} then \code{Smoothing()}:\cr -#'(nmod, nmemb, nsdate, nltime) and (nobs, nmemb, nsdate, nltime)\cr -#'then averaged over the members\cr -#'\code{Mean1Dim(var_exp/var_obs, posdim = 2)}:\cr -#'(nmod, nsdate, nltime) and (nobs, nsdate, nltime)\cr -#'then passed through\cr -#' \code{Corr(exp, obs, posloop = 1, poscor = 2)} or\cr -#' \code{RMS(exp, obs, posloop = 1, posRMS = 2)}:\cr -#' (nmod, nobs, 3, nltime)\cr -#'would plot the correlations or RMS between each exp & each obs as a function -#'of the forecast time. -#' -#'@keywords dynamic -#'@author History:\cr -#'0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@@ic3.cat}) - Original code\cr -#'0.2 - 2013-03 (I. Andreu-Burillo, \email{isabel.andreu-burillo@@ic3.cat}) - Introduced parameter sizetit\cr -#'0.3 - 2013-10 (I. Andreu-Burillo, \email{isabel.andreu-burillo@@ic3.cat}) - Introduced parameter show_conf\cr -#'1.0 - 2013-11 (N. Manubens, \email{nicolau.manubens@@ic3.cat}) - Formatting to CRAN -#'@examples -#'# Load sample data as in Load() example: -#'example(Load) -#'clim <- Clim(sampleData$mod, sampleData$obs) -#'ano_exp <- Ano(sampleData$mod, clim$clim_exp) -#'ano_obs <- Ano(sampleData$obs, clim$clim_obs) -#'runmean_months <- 12 -#'dim_to_smooth <- 4 # Smooth along lead-times -#'smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth) -#'smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth) -#'dim_to_mean <- 2 # Mean along members -#'required_complete_row <- 3 # Discard startdates for which there are NA leadtimes -#'leadtimes_per_startdate <- 60 -#'corr <- Corr(Mean1Dim(smooth_ano_exp, dim_to_mean), -#' Mean1Dim(smooth_ano_obs, dim_to_mean), -#' compROW = required_complete_row, -#' limits = c(ceiling((runmean_months + 1) / 2), -#' leadtimes_per_startdate - floor(runmean_months / 2))) -#' \donttest{ -#'PlotVsLTime(corr, toptitle = "correlations", ytitle = "correlation", -#' monini = 11, limits = c(-1, 2), listexp = c('CMIP5 IC3'), -#' listobs = c('ERSST'), biglab = FALSE, hlines = c(-1, 0, 1), -#' fileout = 'tos_cor.eps') -#' } -#' -#'@importFrom grDevices dev.cur dev.new dev.off -#'@importFrom stats ts -#'@export -PlotVsLTime <- function(var, toptitle = '', ytitle = '', monini = 1, freq = 12, - nticks = NULL, limits = NULL, - listexp = c('exp1', 'exp2', 'exp3'), - listobs = c('obs1', 'obs2', 'obs3'), biglab = FALSE, hlines = NULL, - leg = TRUE, siglev = FALSE, sizetit = 1, show_conf = TRUE, - fileout = 'output_plotvsltime.eps', - width = 8, height = 5, size_units = 'in', res = 100, ...) { - # Process the user graphical parameters that may be passed in the call - ## Graphical parameters to exclude - excludedArgs <- c("cex", "cex.axis", "cex.lab", "cex.main", "col", "fin", "lab", "las", "lend", "lty", "lwd", "mai", "mgp", "new", "pin", "ps", "pty") - userArgs <- .FilterUserGraphicArgs(excludedArgs, ...) - - # If there is any filenames to store the graphics, process them - # to select the right device - if (!is.null(fileout)) { - deviceInfo <- .SelectDevice(fileout = fileout, width = width, height = height, units = size_units, res = res) - saveToFile <- deviceInfo$fun - fileout <- deviceInfo$files - } - - # - # Get some arguments - # ~~~~~~~~~~~~~~~~~~~~ - # - if (length(dim(var)) == 3) { - var <- InsertDim(var, posdim = 2, lendim = 1) - } else if (length(dim(var)) != 4) { - stop("Parameter 'var' should have 3 or 4 dimensions: c(n. exp[, n. obs], 3/4, n. lead-times)") - } - nleadtime <- dim(var)[4] - nexp <- dim(var)[1] - nobs <- dim(var)[2] - if (is.null(limits) == TRUE) { - if (all(is.na(var > 0))) { - ll <- ul <- 0 - } else { - ll <- min(var, na.rm = TRUE) - ul <- max(var, na.rm = TRUE) - } - if (biglab) { - ul <- ul + 0.4 * (ul - ll) - } else { - ul <- ul + 0.3 * (ul - ll) - } - } else { - ll <- limits[1] - ul <- limits[2] - } - lastyear <- (monini + (nleadtime - 1) * 12 / freq - 1) %/% 12 - lastmonth <- (monini + (nleadtime - 1) * 12 / freq - 1) %% 12 + 1 - empty_ts <- ts(start = c(0000, (monini - 1) %/% (12 / freq) + 1), - end = c(lastyear, (lastmonth - 1) %/% (12 / freq) + 1), - frequency = freq) - empty <- array(dim = length(empty_ts)) - # - # Define some plot parameters - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # - if (is.null(nticks)) { - if (biglab) { - nticks <- 5 - } else { - nticks <- 10 - } - } - labind <- seq(1, nleadtime, max(nleadtime %/% nticks, 1)) - months <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", - "Oct", "Nov", "Dec") - labyear <- ((labind - 1) * 12 / freq + monini - 1) %/% 12 - labmonth <- months[((labind - 1) * 12 / freq + monini -1 ) %% 12 + 1] - for (jx in 1:length(labmonth)) { - y2o3dig <- paste("0", as.character(labyear[jx]), sep = "") - labmonth[jx] <- paste(labmonth[jx], "\nYr ", substr(y2o3dig, nchar(y2o3dig) - - 1, nchar(y2o3dig)), sep = "") - } - color <- c("red1", "dodgerblue1", "green1", "orange1", "lightblue1", - "deeppink1", "mediumpurple1", "lightgoldenrod1", "olivedrab1", - "mediumorchid1") - type <- c(1, 3, 2, 4) - thickness <- array(dim = c(4, 4)) - thickness[, 1] <- c(1, 2, 1, 1.5) - thickness[, 2] <- c(8, 12, 8, 10) - thickness[, 3] <- thickness[, 1] - thickness[, 4] <- c(4, 6, 4, 5) - if (siglev == TRUE) { - lines <- c("n", "l", "n", "l") - } else { - lines <- c("l", "l", "l", "n") - } - # - # Define plot layout - # ~~~~~~~~~~~~~~~~~~~~ - # - - # Open connection to graphical device - if (!is.null(fileout)) { - saveToFile(fileout) - } else if (names(dev.cur()) == 'null device') { - dev.new(units = size_units, res = res, width = width, height = height) - } - - # Load the user parameters - par(userArgs) - - if (biglab) { - par(mai = c(1.25, 1.4, 0.5, 1), mgp = c(4, 2.5, 0)) - par(cex = 1.3, cex.lab = 2, cex.axis = 1.8) - cexmain <- 2.2 - legsize <- 1.5 - } else { - par(mai = c(1, 1.1, 0.5, 0), mgp = c(3, 1.8, 0)) - par(cex = 1.3, cex.lab = 1.5, cex.axis = 1.1) - cexmain <- 1.5 - legsize <- 1 - } - plot(empty, ylim = c(ll, ul), xlab = "Time (months)", ylab = ytitle, - main = toptitle, cex.main = cexmain*sizetit, axes = FALSE) - axis(1, at = labind, labels = labmonth) - axis(2) - box() - if (is.null(hlines) != TRUE) { - for (jy in 1:length(hlines)) { - par(new = TRUE) - abline(h = hlines[jy]) - } - } - # - # Loop on experimental & observational data - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # - legendnames <- array(dim = nobs * nexp) - legendthick <- array(dim = nobs * nexp) - legendsty <- array(dim = nobs * nexp) - legendcol <- array(dim = nobs * nexp) - ind <- 1 - if (show_conf == TRUE) { - start_line <- dim(var)[3] - end_line <- 1 - } else { - start_line <- 2 - end_line <- 2 - } - for (jt in seq(start_line, end_line, -1)) { - ind <- 1 - for (jexp in 1:nexp) { - for (jobs in 1:nobs) { - par(new = TRUE) - plot(var[jexp, jobs, jt, ], type = lines[jt], ylim = c(ll, ul), - col = color[jexp], lty = type[jobs], lwd = thickness[jobs, jt], - ylab = "", xlab = "", axes = FALSE) - legendnames[ind] <- paste(listexp[jexp], 'vs', listobs[jobs]) - legendthick[ind] <- thickness[jobs, 1] * 3 - legendsty[ind] <- type[jobs] - legendcol[ind] <- color[jexp] - ind <- ind + 1 - } - } - } - if (leg) { - if (nobs == 1) { - legendnames <- listexp[1:nexp] - } - legend(1, ul, legendnames, lty = legendsty, lwd = legendthick, - col = legendcol, cex = legsize) - } - - # If the graphic was saved to file, close the connection with the device - if(!is.null(fileout)) dev.off() -} diff --git a/R/RMS.R b/R/RMS.R index d4555d2bb48fa94cc7476b5e1521512711e1d750..cc522f59af5891df72c6db2b972e38a0f276a03b 100644 --- a/R/RMS.R +++ b/R/RMS.R @@ -45,33 +45,21 @@ #' The upper confidence interval. Only present if \code{conf = TRUE}. #'} #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2011-05 (V. Guemas, \email{vguemas@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens2@ic3.cat}) - Formatting to R CRAN\cr -#'1.1 - 2017-02 (A. Hunter, \email{alasdair.hunter@bsc.es}) - Adapted to veriApply() -#'3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Adapt multiApply feature #'@examples #'# Load sample data as in Load() example: -#'example(Load) -#'clim <- Clim(sampleData$mod, sampleData$obs) -#'ano_exp <- Ano(sampleData$mod, clim$clim_exp) -#'ano_obs <- Ano(sampleData$obs, clim$clim_obs) -#'runmean_months <- 12 -#'dim_to_smooth <- 4 # Smooth along lead-times -#'smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth) -#'smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth) -#'dim_to_mean <- 2 # Mean along members -#'# Discard start-dates for which some leadtimes are missing -#'leadtimes_per_startdate <- 60 -#'rms <- RMS(smooth_ano_exp, -#' smooth_ano_obs, -#' comp_dim = 'ftime', -#' limits = c(ceiling((runmean_months + 1) / 2), -#' leadtimes_per_startdate - floor(runmean_months / 2))) +#' set.seed(1) +#' exp1 <- array(rnorm(120), dim = c(member = 3, sdate = 5, ftime = 2, lon = 1, lat = 4)) +#' set.seed(2) +#' obs1 <- array(rnorm(80), dim = c(member = 2, sdate = 5, ftime = 2, lon = 1, lat = 4)) +#' set.seed(2) +#' na <- floor(runif(10, min = 1, max = 80)) +#' obs1[na] <- NA +#' res <- RMS(exp1, obs1, comp_dim = 'ftime') +#' # Renew example when Ano and Smoothing are ready #' #'@rdname RMS #'@import multiApply +#'@importFrom ClimProjDiags Subset #'@export RMS <- function(exp, obs, time_dim = 'sdate', memb_dim = 'member', comp_dim = NULL, limits = NULL, diff --git a/R/RMSSS.R b/R/RMSSS.R index d59e0af271ec6168477d5b2e884fa16b5f34505b..fa5cca937329d4462f0b2b911c918dc72d306d8a 100644 --- a/R/RMSSS.R +++ b/R/RMSSS.R @@ -40,12 +40,6 @@ #' The p-value. Only present if \code{pval = TRUE}. #'} #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2012-04 (V. Guemas, \email{vguemas@bsc.es}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to R CRAN\cr -#'1.1 - 2017-02 (A. Hunter, \email{alasdair.hunter@bsc.es}) - Adapted to veriApply() -#'3.0 - 2019-12 (A. Ho, \email{an.ho@bsc.es}) - Adopt multiApply feature #'@examples #' set.seed(1) #' exp <- array(rnorm(15), dim = c(dat = 1, time = 3, member = 5)) diff --git a/R/Regression.R b/R/Regression.R index a0c38747762544c792aabefebbea364518a8616a..98ef4d2d8f6766d54ef0d7a7c8dc155981c7934c 100644 --- a/R/Regression.R +++ b/R/Regression.R @@ -31,6 +31,7 @@ #' #'@import multiApply #'@return +#'A list containing: #'\item{$regression}{ #' A numeric array with same dimensions as parameter 'datay' and 'datax' except #' the 'time_dim' dimension, which is replaced by a 'stats' dimension containing @@ -64,18 +65,13 @@ #' dimension. #'} #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2013-05 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN -#'2.0 - 2019-10 (N. Perez-Zanon, \email{nuria.perez@bsc.es}) - Formatting to multiApply -#'3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Adapt multiApply feature #'@examples #'# Load sample data as in Load() example: #'example(Load) -#'datay <- sampleData$mod -#'datax <- sampleData$obs -#'datay <- Subset(datay, 'member', 2) +#'datay <- sampleData$mod[, 1, , ] +#'names(dim(datay)) <- c('sdate', 'ftime') +#'datax <- sampleData$obs[, 1, , ] +#'names(dim(datax)) <- c('sdate', 'ftime') #'res1 <- Regression(datay, datax, formula = y~poly(x, 2, raw = TRUE)) #'res2 <- Regression(datay, datax, conf.lev = 0.9) #' diff --git a/R/Reorder.R b/R/Reorder.R index 4da21740e3b248e2a6c573014c8584c2882da8ed..8a248e936a5b9ab8f821690e2712064434a41142 100644 --- a/R/Reorder.R +++ b/R/Reorder.R @@ -3,14 +3,12 @@ #'Reorder the dimension order of a multi-dimensional array #' #'@param data An array of which the dimension to be reordered. -#'@param posdim An integer indicating the position of the new dimension. -#'@param lendim An integer indicating the length of the new dimension. +#'@param order A vector of indices or character strings indicating the new +#' order of the dimension. #' #'@return An array which has the same values as parameter 'data' but with #' different dimension order. #' -#'@keywords datagen -#'@author History:\cr #'@examples #' dat1 <- array(c(1:30), dim = c(dat = 1, sdate = 3, ftime = 2, lon = 5)) #' print(dim(Reorder(dat1, c(2, 1, 4, 3)))) diff --git a/R/Season.R b/R/Season.R index 5ba97869d055677454a1960fccd0317b67b10c38..8de341535fb327903df33d5fda227788f809ef3f 100644 --- a/R/Season.R +++ b/R/Season.R @@ -24,7 +24,6 @@ #'@return An array with the same dimensions as data except along the 'time_dim' #' dimension, of which the length changes to the number of seasons. #' -#'@import multiApply #'@examples #'set.seed(1) #'dat1 <- array(rnorm(144*3), dim = c(member = 2, sdate = 12*3, ftime = 2, lon = 3)) diff --git a/R/ToyModel.R b/R/ToyModel.R index 4b06facf23208e31dfbe24aaf9b121e1d181fb9a..0919a2ef67eb7933f812a41c3aafb0c0e42bbb61 100644 --- a/R/ToyModel.R +++ b/R/ToyModel.R @@ -46,10 +46,6 @@ #' observations. The dimensions correspond to #' c(length(gamma), nmemb, nstartd, nleadt) #' -#'@keywords datagen -#'@author History:\cr -#'1.0 - 2014-08 (O.Bellprat) - Original code -#'1.1 - 2016-02 (O.Bellprat) - Include security check for parameters #'@examples #'# Example 1: Generate forecast with artifical observations #'# Seasonal prediction example @@ -67,7 +63,7 @@ #'# Example 2: Generate forecast from loaded observations #'# Decadal prediction example #' \dontrun{ -#'data_path <- system.file('sample_data', package = 's2dverification') +#'data_path <- system.file('sample_data', package = 's2dv') #'expA <- list(name = 'experiment', path = file.path(data_path, #' 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', #' '$VAR_NAME$_$START_DATE$.nc')) @@ -83,7 +79,7 @@ #' } #' \dontshow{ #'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -#'sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), +#'sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), #' c('observation'), startDates, #' output = 'areave', #' latmin = 27, latmax = 48, @@ -178,7 +174,7 @@ ToyModel <- function(alpha = 0.1, beta = 0.4, gamma = 1, sig = 1, nleadt)) # Allocate observations and forecast according to - # s2dverification standards + # s2dv standards for (j in 1:nstartd) { for (f in 1:nleadt) { for (g in 1:length(gamma)) { diff --git a/R/Trend.R b/R/Trend.R index e970f8997859bc768a0137dd945b0d6d53294e27..c16b3c31c37cbae925563439ef33aefc4db60cf4 100644 --- a/R/Trend.R +++ b/R/Trend.R @@ -21,6 +21,7 @@ #' computation. The default value is NULL. #' #'@return +#'A list containing: #'\item{$trend}{ #' A numeric array with the first dimension 'stats', followed by the same #' dimensions as parameter 'data' except the 'time_dim' dimension. The length @@ -49,12 +50,6 @@ #' detrended values along the 'time_dim' dimension. #'} #' -#'@keywords datagen -#'@author History:\cr -#'0.1 - 2011-05 (V. Guemas, \email{virginie.guemas@@ic3.cat}) - Original code\cr -#'1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@@ic3.cat}) - Formatting to CRAN\cr -#'2.0 - 2017-02 (A. Hunter, \email{alasdair.hunter@@bsc.es}) - Adapt to veriApply() -#'3.0 - 2019-12 (A. Ho, \email{an.ho@bsc.es}) - Adapt multiApply feature #'@examples #'# Load sample data as in Load() example: #'example(Load) diff --git a/R/Utils.R b/R/Utils.R index 6bb8c51a2f27494eca33ce2ca3e896b175340ef6..bb344268f2d5a2640a73e979f641c3e4a7f59cb8 100644 --- a/R/Utils.R +++ b/R/Utils.R @@ -1,3 +1,7 @@ +#'@importFrom abind abind +#'@importFrom plyr take +#'@import ncdf4 + ## Function to tell if a regexpr() match is a complete match to a specified name .IsFullMatch <- function(x, name) { ifelse(x > 0 && attributes(x)$match.length == nchar(name), TRUE, FALSE) @@ -876,11 +880,11 @@ weights <- weights / mean(weights, na.rm = TRUE) mean(x * weights, na.rm = TRUE) } else { - weights <- weights / InsertDim(Mean1Dim(weights, 2, narm = TRUE), 2, length(final_lats)) - Mean1Dim(x * weights, 2, narm = TRUE) + weights <- weights / InsertDim(MeanDims(weights, 2, na.rm = TRUE), 2, length(final_lats)) + MeanDims(x * weights, 2, na.rm = TRUE) } } else if (output == 'lat') { - Mean1Dim(x, 1, narm = TRUE) + MeanDims(x, 1, na.rm = TRUE) } else if (output == 'lonlat') { signif(x, 5) } @@ -972,7 +976,7 @@ lead_times_position <- 4 if (output == 'lonlat') { - sampleData <- s2dverification::sampleMap + sampleData <- s2dv::sampleMap if (is.null(leadtimemax)) { leadtimemax <- dim(sampleData$mod)[lead_times_position] } @@ -983,7 +987,7 @@ dataOut$obs <- sampleData$obs[, , selected_start_dates, selected_lead_times, , ] } else if (output == 'areave') { - sampleData <- s2dverification::sampleTimeSeries + sampleData <- s2dv::sampleTimeSeries if (is.null(leadtimemax)) { leadtimemax <- dim(sampleData$mod)[lead_times_position] } diff --git a/R/clim.palette.R b/R/clim.palette.R index d18dab165919da9b84e85ca06225ec68487c0c05..e847ad3a983e11614997c675203b723e6f47b22e 100644 --- a/R/clim.palette.R +++ b/R/clim.palette.R @@ -9,9 +9,6 @@ #' to red ('redyellow'). #'@param n Number of colors to generate. #' -#'@keywords datagen -#'@author History:\cr -#'0.0 - 2016-01 (N. Manubens, \email{nicolau.manubens@@bsc.es}) - Original code. #'@examples #'lims <- seq(-1, 1, length.out = 21) #' diff --git a/R/s2dv-package.R b/R/s2dv-package.R new file mode 100644 index 0000000000000000000000000000000000000000..28b1493975bcf6533e939cefb37eb2b5454f7e44 --- /dev/null +++ b/R/s2dv-package.R @@ -0,0 +1,3 @@ +#'@references \url{https://earth.bsc.es/gitlab/es/s2dverification/} +#'@keywords internal +"_PACKAGE" diff --git a/R/sampleDepthData.R b/R/sampleDepthData.R new file mode 100644 index 0000000000000000000000000000000000000000..af1e9254a187328731d82eca1db7132f187a3d4a --- /dev/null +++ b/R/sampleDepthData.R @@ -0,0 +1,26 @@ +#'Sample of Experimental Data for Forecast Verification In Function Of +#'Latitudes And Depths +#' +#'This data set provides data in function of latitudes and depths for the +#'variable 'tos', i.e. sea surface temperature, from the decadal climate +#'prediction experiment run at IC3 in the context of the CMIP5 project.\cr +#'Its name within IC3 local database is 'i00k'. +#' +#'@usage data(sampleDepthData) +#'@format The data set provides with a variable named 'sampleDepthData'.\cr\cr +#' +#'sampleDepthData$exp is an array that contains the experimental data and the +#'dimension meanings and values are:\cr +#' c(# of experimental datasets, # of members, # of starting dates, +#' # of lead-times, # of depths, # of latitudes)\cr +#' c(1, 5, 3, 60, 7, 21)\cr\cr +#' +#'sampleDepthData$obs should be an array that contained the observational data +#'but in this sample is not defined (NULL).\cr\cr +#' +#'sampleDepthData$depths is an array with the 7 longitudes covered by the data.\cr\cr +#' +#'sampleDepthData$lat is an array with the 21 latitudes covered by the data.\cr\cr +#'@name sampleDepthData +#'@docType data +sampleDepthData <- function(){} diff --git a/R/sampleMap.R b/R/sampleMap.R new file mode 100644 index 0000000000000000000000000000000000000000..8d7cdc4802a805f4b8337d8885eafba742247429 --- /dev/null +++ b/R/sampleMap.R @@ -0,0 +1,44 @@ +#'Sample Of Observational And Experimental Data For Forecast Verification In Function Of Longitudes And Latitudes +#' +#'This data set provides data in function of longitudes and latitudes for the variable 'tos', i.e. sea surface temperature, over the mediterranean zone from the sample experimental and observational datasets attached to the package. See examples on how to use Load() for details.\cr\cr +#'The data is provided through a variable named 'sampleMap' and is structured as expected from the 'Load()' function in the 's2dv' package if was called as follows:\cr\cr +#' \preformatted{ +#'data_path <- system.file('sample_data', package = 's2dv') +#'exp <- list( +#' name = 'experiment', +#' path = file.path(data_path, 'model/$EXP_NAME$/monthly_mean', +#' '$VAR_NAME$_3hourly/$VAR_NAME$_$START_DATES$.nc') +#' ) +#'obs <- list( +#' name = 'observation', +#' path = file.path(data_path, 'observation/$OBS_NAME$/monthly_mean', +#' '$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc') +#' ) +#'# Now we are ready to use Load(). +#'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') +#'sampleData <- Load('tos', list(exp), list(obs), startDates, +#' leadtimemin = 1, leadtimemax = 4, output = 'lonlat', +#' latmin = 27, latmax = 48, lonmin = -12, lonmax = 40) +#' } +#'Check the documentation on 'Load()' in the package 's2dv' for more information. +#' +#'@usage data(sampleMap) +#'@format +#'The data set provides with a variable named 'sampleMap'.\cr\cr +#' +#'sampleMap$mod is an array that contains the experimental data and the dimension meanings and values are:\cr +#' c(# of experimental datasets, # of members, # of starting dates, # of lead-times, # of latitudes, # of longitudes)\cr +#' c(1, 3, 5, 60, 2, 3)\cr\cr +#' +#'sampleMap$obs is an array that contains the observational data and the dimension meanings and values are:\cr +#' c(# of observational datasets, # of members, # of starting dates, # of lead-times, # of latitudes, # of longitudes)\cr +#' c(1, 1, 5, 60, 2, 3)\cr\cr +#' +#' sampleMap$lat is an array with the 2 latitudes covered by the data (see examples on Load() for details on why such low resolution).\cr\cr +#' +#' sampleMap$lon is an array with the 3 longitudes covered by the data (see examples on Load() for details on why such low resolution). +#' +#' @name sampleMap +#' @docType data +sampleMap <- function(){} + diff --git a/R/sampleTimeSeries.R b/R/sampleTimeSeries.R new file mode 100644 index 0000000000000000000000000000000000000000..fe8e15238b31101b8c7ede08850033e6a4663b8d --- /dev/null +++ b/R/sampleTimeSeries.R @@ -0,0 +1,46 @@ +#'Sample Of Observational And Experimental Data For Forecast Verification As Area Averages +#' +#'This data set provides area averaged data for the variable 'tos', i.e. sea +#'surface temperature, over the mediterranean zone from the example datasets +#'attached to the package. See examples on Load() for more details.\cr\cr +#'The data is provided through a variable named 'sampleTimeSeries' and is +#'structured as expected from the 'Load()' function in the 's2dv' +#'package if was called as follows:\cr\cr +#' \preformatted{ +#'data_path <- system.file('sample_data', package = 's2dv') +#'exp <- list( +#' name = 'experiment', +#' path = file.path(data_path, 'model/$EXP_NAME$/monthly_mean', +#' '$VAR_NAME$_3hourly/$VAR_NAME$_$START_DATES$.nc') +#' ) +#'obs <- list( +#' name = 'observation', +#' path = file.path(data_path, 'observation/$OBS_NAME$/monthly_mean', +#' '$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc') +#' ) +#'# Now we are ready to use Load(). +#'startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') +#'sampleData <- Load('tos', list(exp), list(obs), startDates, +#' output = 'areave', latmin = 27, latmax = 48, lonmin = -12, +#' lonmax = 40) +#' } +#'Check the documentation on 'Load()' in the package 's2dv' for more information. +#' +#'@usage data(sampleTimeSeries) +#'@format The data set provides with a variable named 'sampleTimeSeries'.\cr\cr +#' +#'sampleTimeSeries$mod is an array that contains the experimental data and the dimension meanings and values are:\cr +#' c(# of experimental datasets, # of members, # of starting dates, # of lead-times)\cr +#' c(1, 3, 5, 60)\cr\cr +#' +#'sampleTimeSeries$obs is an array that contains the observational data and the dimension meanings and values are:\cr +#' c(# of observational datasets, # of members, # of starting dates, # of lead-times)\cr +#' c(1, 1, 5, 60)\cr\cr +#' +#'sampleTimeSeries$lat is an array with the 2 latitudes covered by the data that was area averaged to calculate the time series (see examples on Load() for details on why such low resolution).\cr\cr +#' +#'sampleTimeSeries$lon is an array with the 3 longitudes covered by the data that was area averaged to calculate the time series (see examples on Load() for details on why such low resolution). +#' +#' @name sampleTimeSeries +#' @docType data +sampleTimeSeries <- function(){} diff --git a/README.md b/README.md index 66b0b5dfa84cc7cf364a85f12d38d0c9f13423b9..7eb9a340daa0bbb089307c4ad4d10f1ec37075eb 100644 --- a/README.md +++ b/README.md @@ -12,10 +12,14 @@ package 'multiApply'. Therefore, it can use multi-core for computation and work with multi-dimensional arrays with a higher level of flexibility. Find more information about its previous package s2dverification on GitLab - or on the + or on the CRAN website at . +A review of s2dverification package was published in the Environmental Modelling & Software journal. + +> Manubens, N., L.-P. Caron, A. Hunter, O. Bellprat, E. Exarchou, N.S. Fučkar, J. Garcia-Serrano, F. Massonnet, M. Ménégoz, V. Sicardi, L. Batté, C. Prodhomme, V. Torralba, N. Cortesi, O. Mula-Valls, K. Serradell, V. Guemas, F.J. Doblas-Reyes (2018). An R Package for Climate Forecast Verification. Environmental Modelling & Software, 103, 29-42, doi:10.1016/j.envsoft.2018.01.018 + Installation ------------ diff --git a/data/sampleDepthData.RData b/data/sampleDepthData.RData new file mode 100644 index 0000000000000000000000000000000000000000..f3cb8135154bd7ec725a0ad76e9c028e474cf8a3 Binary files /dev/null and b/data/sampleDepthData.RData differ diff --git a/data/sampleMap.RData b/data/sampleMap.RData new file mode 100644 index 0000000000000000000000000000000000000000..259a48c0b3cf18e8a415872a3ab0d31d4c66f1b5 Binary files /dev/null and b/data/sampleMap.RData differ diff --git a/data/sampleTimeSeries.RData b/data/sampleTimeSeries.RData new file mode 100644 index 0000000000000000000000000000000000000000..2adcfcc6a1097020c3e3b0c86c5438ef536e786e Binary files /dev/null and b/data/sampleTimeSeries.RData differ diff --git a/man/AnimateMap.Rd b/man/AnimateMap.Rd index 96074b01680e82364e133050b4853fbf54e374ee..d2003ee4a385fb495d3571354a55e8623af56946 100644 --- a/man/AnimateMap.Rd +++ b/man/AnimateMap.Rd @@ -129,7 +129,7 @@ Examples of input: \examples{ # See ?Load for explanations on the first part of this example \dontrun{ -data_path <- system.file('sample_data', package = 's2dverification') +data_path <- system.file('sample_data', package = 's2dv') expA <- list(name = 'experiment', path = file.path(data_path, 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', '$VAR_NAME$_$START_DATE$.nc')) @@ -145,11 +145,11 @@ sampleData <- Load('tos', list(expA), list(obsX), startDates, } \dontshow{ startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), - c('observation'), startDates, - output = 'lonlat', - latmin = 27, latmax = 48, - lonmin = -12, lonmax = 40) +sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), + c('observation'), startDates, + output = 'lonlat', + latmin = 27, latmax = 48, + lonmin = -12, lonmax = 40) } clim <- Clim(sampleData$mod, sampleData$obs, memb = FALSE) \dontrun{ @@ -159,39 +159,7 @@ AnimateMap(clim$clim_exp, sampleData$lon, sampleData$lat, msk95lev = FALSE, filled.continents = TRUE, intlon = 10, intlat = 10, fileout = 'clim_dec.gif') } -ano_exp <- Ano(sampleData$mod, clim$clim_exp) -ano_obs <- Ano(sampleData$obs, clim$clim_obs) -leadtimes_dimension <- 4 -initial_month <- 11 -mean_start_month <- 1 -mean_stop_month <- 12 -season_means_mod <- Season(ano_exp, leadtimes_dimension, initial_month, - mean_start_month, mean_stop_month) -season_means_obs <- Season(ano_obs, leadtimes_dimension, initial_month, - mean_start_month, mean_stop_month) - \dontrun{ -AnimateMap(Mean1Dim(season_means_mod, 2)[1, 1, , , ], sampleData$lon, - sampleData$lat, toptitle = "Annual anomalies 1985 of decadal prediction", - sizetit = 1, units = "degree", monini = 1, freq = 1, msk95lev = FALSE, - brks = seq(-0.5, 0.5, 0.1), intlon = 10, intlat = 10, - filled.continents = TRUE, fileout = 'annual_means_dec.gif') - } -dim_to_mean <- 2 # Mean along members -rms <- RMS(Mean1Dim(season_means_mod, dim_to_mean), - Mean1Dim(season_means_obs, dim_to_mean)) - \donttest{ -AnimateMap(rms, sampleData$lon, sampleData$lat, toptitle = - "RMSE decadal prediction", sizetit = 1, units = "degree", - monini = 1, freq = 1, msk95lev = FALSE, brks = seq(0, 0.8, 0.08), - intlon = 10, intlat = 10, filled.continents = TRUE, - fileout = 'rmse_dec.gif') - } -} -\author{ -History:\cr - 1.0 - 2012-04 (V. Guemas, \email{virginie.guemas@bsc.es}) - Original code\cr - 1.1 - 2014-04 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to CRAN\cr - 1.2 - 2015-05 (V. Guemas, \email{virginie.guemas@bsc.es}) - Use of PlotEquiMap and PlotStereoMap +# More examples in s2dverification but are deleted for now + } -\keyword{dynamic} diff --git a/man/CDORemap.Rd b/man/CDORemap.Rd deleted file mode 100644 index b2d5eaa318af16631bb7c813eccd2a635d9f8395..0000000000000000000000000000000000000000 --- a/man/CDORemap.Rd +++ /dev/null @@ -1,229 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/CDORemap.R -\name{CDORemap} -\alias{CDORemap} -\title{Interpolates arrays with longitude and latitude dimensions using CDO} -\usage{ -CDORemap(data_array = NULL, lons, lats, grid, method, avoid_writes = TRUE, - crop = TRUE, force_remap = FALSE, write_dir = tempdir()) -} -\arguments{ -\item{data_array}{Multidimensional numeric array to be interpolated. If -provided, it must have at least a longitude and a latitude dimensions, -identified by the array dimension names. The names for these dimensions -must be one of the recognized by s2dverification (can be checked with -\code{s2dverification:::.KnownLonNames()} and -\code{s2dverification:::.KnownLatNames()}).} - -\item{lons}{Numeric vector or array of longitudes of the centers of the grid -cells. Its size must match the size of the longitude/latitude dimensions -of the input array.} - -\item{lats}{Numeric vector or array of latitudes of the centers of the grid -cells. Its size must match the size of the longitude/latitude dimensions -of the input array.} - -\item{grid}{Character string specifying either a name of a target grid -(recognized by CDO; e.g.: 'r256x128', 't106grid') or a path to another -NetCDF file which to read the target grid from (a single grid must be -defined in such file).} - -\item{method}{Character string specifying an interpolation method -(recognized by CDO; e.g.: 'con', 'bil', 'bic', 'dis'). The following -long names are also supported: 'conservative', 'bilinear', 'bicubic' and -'distance-weighted'.} - -\item{avoid_writes}{The step of permutation is needed when the input array -has more than 3 dimensions and none of the longitude or latitude dimensions - in the right-most position (CDO would not accept it without permuting -previously). This step, executed by default when needed, can be avoided -for the price of writing more intermediate files (whis usually is -unconvenient) by setting the parameter \code{avoid_writes = TRUE}.} - -\item{crop}{Whether to crop the data after interpolation with -'cdo sellonlatbox' (TRUE) or to extend interpolated data to the whole -world as CDO does by default (FALSE). If \code{crop = TRUE} then the -longitude and latitude borders which to crop at are taken as the limits of -the cells at the borders ('lons' and 'lats' are perceived as cell centers), -i.e. the resulting array will contain data that covers the same area as -the input array. This is equivalent to specifying \code{crop = 'preserve'}, -i.e. preserving area. If \code{crop = 'tight'} then the borders which to -crop at are taken as the minimum and maximum cell centers in 'lons' and -'lats', i.e. the area covered by the resulting array may be smaller if -interpolating from a coarse grid to a fine grid. The parameter 'crop' also -accepts a numeric vector of custom borders which to crop at: -c(western border, eastern border, southern border, northern border).} - -\item{force_remap}{Whether to force remapping, even if the input data array -is already on the target grid.} - -\item{write_dir}{Path to the directory where to create the intermediate -files for CDO to work. By default, the R session temporary directory is -used (\code{tempdir()}).} -} -\value{ -A list with the following components: - \item{'data_array'}{The interpolated data array (if an input array - is provided at all, NULL otherwise).} - \item{'lons'}{The longitudes of the data on the destination grid.} - \item{'lats'}{The latitudes of the data on the destination grid.} -} -\description{ -This function takes as inputs a multidimensional array (optional), a vector -or matrix of longitudes, a vector or matrix of latitudes, a destination grid -specification, and the name of a method to be used to interpolate (one of -those available in the 'remap' utility in CDO). The interpolated array is -returned (if provided) together with the new longitudes and latitudes.\cr\cr -\code{CDORemap()} permutes by default the dimensions of the input array (if -needed), splits it in chunks (CDO can work with data arrays of up to 4 -dimensions), generates a file with the data of each chunk, interpolates it -with CDO, reads it back into R and merges it into a result array. If no -input array is provided, the longitude and latitude vectors will be -transformed only. If the array is already on the desired destination grid, -no transformation is performed (this behvaiour works only for lonlat and -gaussian grids). \cr\cr -Any metadata attached to the input data array, longitudes or latitudes will -be preserved or accordingly modified. -} -\examples{ - \dontrun{ -# Interpolating only vectors of longitudes and latitudes -lon <- seq(0, 360 - 360/50, length.out = 50) -lat <- seq(-90, 90, length.out = 25) -tas2 <- CDORemap(NULL, lon, lat, 't170grid', 'bil', TRUE) - -# Minimal array interpolation -tas <- array(1:50, dim = c(25, 50)) -names(dim(tas)) <- c('lat', 'lon') -lon <- seq(0, 360 - 360/50, length.out = 50) -lat <- seq(-90, 90, length.out = 25) -tas2 <- CDORemap(tas, lon, lat, 't170grid', 'bil', TRUE) - -# Metadata can be attached to the inputs. It will be preserved and -# accordignly modified. -tas <- array(1:50, dim = c(25, 50)) -names(dim(tas)) <- c('lat', 'lon') -lon <- seq(0, 360 - 360/50, length.out = 50) -metadata <- list(lon = list(units = 'degrees_east')) -attr(lon, 'variables') <- metadata -lat <- seq(-90, 90, length.out = 25) -metadata <- list(lat = list(units = 'degrees_north')) -attr(lat, 'variables') <- metadata -metadata <- list(tas = list(dim = list(lat = list(len = 25, - vals = lat), - lon = list(len = 50, - vals = lon) - ))) -attr(tas, 'variables') <- metadata -tas2 <- CDORemap(tas, lon, lat, 't170grid', 'bil', TRUE) - -# Arrays of any number of dimensions in any order can be provided. -num_lats <- 25 -num_lons <- 50 -tas <- array(1:(10*num_lats*10*num_lons*10), - dim = c(10, num_lats, 10, num_lons, 10)) -names(dim(tas)) <- c('a', 'lat', 'b', 'lon', 'c') -lon <- seq(0, 360 - 360/num_lons, length.out = num_lons) -metadata <- list(lon = list(units = 'degrees_east')) -attr(lon, 'variables') <- metadata -lat <- seq(-90, 90, length.out = num_lats) -metadata <- list(lat = list(units = 'degrees_north')) -attr(lat, 'variables') <- metadata -metadata <- list(tas = list(dim = list(a = list(), - lat = list(len = num_lats, - vals = lat), - b = list(), - lon = list(len = num_lons, - vals = lon), - c = list() - ))) -attr(tas, 'variables') <- metadata -tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', TRUE) -# The step of permutation can be avoided but more intermediate file writes -# will be performed. -tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE) - -# If the provided array has the longitude or latitude dimension in the -# right-most position, the same number of file writes will be performed, -# even if avoid_wrties = FALSE. -num_lats <- 25 -num_lons <- 50 -tas <- array(1:(10*num_lats*10*num_lons*10), - dim = c(10, num_lats, 10, num_lons)) -names(dim(tas)) <- c('a', 'lat', 'b', 'lon') -lon <- seq(0, 360 - 360/num_lons, length.out = num_lons) -metadata <- list(lon = list(units = 'degrees_east')) -attr(lon, 'variables') <- metadata -lat <- seq(-90, 90, length.out = num_lats) -metadata <- list(lat = list(units = 'degrees_north')) -attr(lat, 'variables') <- metadata -metadata <- list(tas = list(dim = list(a = list(), - lat = list(len = num_lats, - vals = lat), - b = list(), - lon = list(len = num_lons, - vals = lon) - ))) -attr(tas, 'variables') <- metadata -tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', TRUE) -tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE) - -# An example of an interpolation from and onto a rectangular regular grid -num_lats <- 25 -num_lons <- 50 -tas <- array(1:(1*num_lats*num_lons), dim = c(num_lats, num_lons)) -names(dim(tas)) <- c('y', 'x') -lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons), - dim = c(num_lons, num_lats)) -metadata <- list(lon = list(units = 'degrees_east')) -names(dim(lon)) <- c('x', 'y') -attr(lon, 'variables') <- metadata -lat <- t(array(seq(-90, 90, length.out = num_lats), - dim = c(num_lats, num_lons))) -metadata <- list(lat = list(units = 'degrees_north')) -names(dim(lat)) <- c('x', 'y') -attr(lat, 'variables') <- metadata -tas2 <- CDORemap(tas, lon, lat, 'r100x50', 'bil') - -# An example of an interpolation from an irregular grid onto a gaussian grid -num_lats <- 25 -num_lons <- 50 -tas <- array(1:(10*num_lats*10*num_lons*10), - dim = c(10, num_lats, 10, num_lons)) -names(dim(tas)) <- c('a', 'j', 'b', 'i') -lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons), - dim = c(num_lons, num_lats)) -metadata <- list(lon = list(units = 'degrees_east')) -names(dim(lon)) <- c('i', 'j') -attr(lon, 'variables') <- metadata -lat <- t(array(seq(-90, 90, length.out = num_lats), - dim = c(num_lats, num_lons))) -metadata <- list(lat = list(units = 'degrees_north')) -names(dim(lat)) <- c('i', 'j') -attr(lat, 'variables') <- metadata -tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil') - -# Again, the dimensions can be in any order -num_lats <- 25 -num_lons <- 50 -tas <- array(1:(10*num_lats*10*num_lons), - dim = c(10, num_lats, 10, num_lons)) -names(dim(tas)) <- c('a', 'j', 'b', 'i') -lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons), - dim = c(num_lons, num_lats)) -names(dim(lon)) <- c('i', 'j') -lat <- t(array(seq(-90, 90, length.out = num_lats), - dim = c(num_lats, num_lons))) -names(dim(lat)) <- c('i', 'j') -tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil') -tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE) -# It is ossible to specify an external NetCDF file as target grid reference -tas2 <- CDORemap(tas, lon, lat, 'external_file.nc', 'bil') -} -} -\author{ -History:\cr - 0.0 - 2017-01 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Original code. -} -\keyword{datagen} - diff --git a/man/Clim.Rd b/man/Clim.Rd index b17b2ee50131b168664fb80e7f6fc8c8271edd1b..8bb93f1e07c0c91073a4463b1b4bef42c398910e 100644 --- a/man/Clim.Rd +++ b/man/Clim.Rd @@ -74,7 +74,7 @@ the 'exp' and 'obs' are excluded when computing the climatologies. # Load sample data as in Load() example: example(Load) clim <- Clim(sampleData$mod, sampleData$obs) -clim2 <- Clim(sampleData$mod, sampleData$obs, method = 'kharin', memb = F) +clim2 <- Clim(sampleData$mod, sampleData$obs, method = 'kharin', memb = FALSE) \donttest{ PlotClim(clim$clim_exp, clim$clim_obs, toptitle = paste('sea surface temperature climatologies'), @@ -82,11 +82,4 @@ PlotClim(clim$clim_exp, clim$clim_obs, listobs = c('ERSST'), biglab = FALSE, fileout = 'tos_clim.eps') } } -\author{ -History:\cr - 0.9 - 2011-03 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr - 1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to R CRAN - 3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Adopt multiApply feature -} -\keyword{datagen} diff --git a/man/ColorBar.Rd b/man/ColorBar.Rd index 71da02be33f60396c2c0338f4df3634b6fe6f5cd..1287b70ecba354d01441fb103b497e969a177bf9 100644 --- a/man/ColorBar.Rd +++ b/man/ColorBar.Rd @@ -175,14 +175,4 @@ cols <- c("dodgerblue4", "dodgerblue1", "forestgreen", "yellowgreen", "white", lims <- seq(-1, 1, 0.2) ColorBar(lims, cols) } -\author{ -History:\cr - 0.1 - 2012-04 (V. Guemas, \email{virginie.guemas@bsc.es}) - Original code\cr - 0.2 - 2013-04 (I. Andreu-Burillo, \email{isabel.andreu-burillo@bsc.es}) - Vert option\cr - 1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to CRAN\cr - 1.1 - 2013-09 (C. Prodhomme, \email{chloe.prodhomme@bsc.es}) - Add cex option\cr - 1.2 - 2016-08 (N. Manubens, \email{nicolau.manubens@bsc.es}) - New ColorBar\cr - (V. Torralba, \email{veronica.torralba@bsc.es}) -} -\keyword{hplot} diff --git a/man/ConfigApplyMatchingEntries.Rd b/man/ConfigApplyMatchingEntries.Rd new file mode 100644 index 0000000000000000000000000000000000000000..5f0efb100dee5886b69850262cdcb54203bc77d3 --- /dev/null +++ b/man/ConfigApplyMatchingEntries.Rd @@ -0,0 +1,71 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ConfigApplyMatchingEntries.R +\name{ConfigApplyMatchingEntries} +\alias{ConfigApplyMatchingEntries} +\title{Apply Matching Entries To Dataset Name And Variable Name To Find Related Info} +\usage{ +ConfigApplyMatchingEntries(configuration, var, exp = NULL, obs = NULL, + show_entries = FALSE, show_result = TRUE) +} +\arguments{ +\item{configuration}{Configuration object obtained from ConfigFileOpen() +or ConfigFileCreate().} + +\item{var}{Name of the variable to load. Will be interpreted as a string, +regular expressions do not apply here. +Examples: 'tas' or 'tasmax_q90'.} + +\item{exp}{Set of experimental dataset identifiers. Will be interpreted as +a strings, regular expressions do not apply here. Can be NULL (not to +check in experimental dataset tables), and takes by default NULL. +Examples: c('EnsEcmwfSeas', 'EnsUkmoSeas'), c('i00k').} + +\item{obs}{Set of observational dataset identifiers. Will be interpreted as +a strings, regular expressions do not apply here. Can be NULL (not to +check in observational dataset tables), and takes by default NULL. +Examples: c('GLORYS', 'ERAint'), c('NCEP').} + +\item{show_entries}{Flag to stipulate whether to show the found matching +entries for all datasets and variable name.} + +\item{show_result}{Flag to stipulate whether to show the result of applying +all the matching entries (dataset main path, file path, ...).} +} +\value{ +A list with the information resulting of applying the matching + entries is returned. +} +\description{ +Given a pair of dataset name and variable name, this function determines +applies all the matching entries found in the corresponding configuration +table to work out the dataset main path, file path, actual name of variable +inside NetCDF files, ... +} +\examples{ +# Create an empty configuration file +config_file <- paste0(tempdir(), "/example.conf") +s2dv::ConfigFileCreate(config_file, confirm = FALSE) +# Open it into a configuration object +configuration <- ConfigFileOpen(config_file) +# Add an entry at the bottom of 4th level of file-per-startdate experiments +# table which will associate the experiment "ExampleExperiment2" and variable +# "ExampleVariable" to some information about its location. +configuration <- ConfigAddEntry(configuration, "experiments", + "last", "ExampleExperiment2", "ExampleVariable", + "/path/to/ExampleExperiment2/", + "ExampleVariable/ExampleVariable_$START_DATE$.nc") +# Edit entry to generalize for any variable. Changing variable needs . +configuration <- ConfigEditEntry(configuration, "experiments", 1, + var_name = ".*", + file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +# Now apply matching entries for variable and experiment name and show the +# result +match_info <- ConfigApplyMatchingEntries(configuration, 'tas', + exp = c('ExampleExperiment2'), show_result = TRUE) +} +\seealso{ +ConfigApplyMatchingEntries, ConfigEditDefinition, + ConfigEditEntry, ConfigFileOpen, ConfigShowSimilarEntries, + ConfigShowTable +} + diff --git a/man/ConfigEditDefinition.Rd b/man/ConfigEditDefinition.Rd new file mode 100644 index 0000000000000000000000000000000000000000..8e1e968c8e842c471c2cc37dd44fd5284858e6c7 --- /dev/null +++ b/man/ConfigEditDefinition.Rd @@ -0,0 +1,60 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ConfigEditDefinition.R +\name{ConfigEditDefinition} +\alias{ConfigEditDefinition} +\alias{ConfigRemoveDefinition} +\title{Add Modify Or Remove Variable Definitions In Configuration} +\usage{ +ConfigEditDefinition(configuration, name, value, confirm = TRUE) + +ConfigRemoveDefinition(configuration, name) +} +\arguments{ +\item{configuration}{Configuration object obtained wit ConfigFileOpen() or +ConfigFileCreate().} + +\item{name}{Name of the variable to add/modify/remove.} + +\item{value}{Value to associate to the variable.} + +\item{confirm}{Flag to stipulate whether to ask for confirmation if the +variable is being modified. Takes by default TRUE.} +} +\value{ +A modified configuration object is returned. +} +\description{ +These functions help in adding, modifying or removing variable definitions +in a configuration object obtained with \code{\link{ConfigFileOpen}} or +\code{\link{ConfigFileCreate}}. ConfigEditDefinition() will add the +definition if not existing. +} +\examples{ +# Create an empty configuration file +config_file <- paste0(tempdir(), "/example.conf") +ConfigFileCreate(config_file, confirm = FALSE) +# Open it into a configuration object +configuration <- ConfigFileOpen(config_file) +# Add an entry at the bottom of 4th level of file-per-startdate experiments +# table which will associate the experiment "ExampleExperiment2" and variable +# "ExampleVariable" to some information about its location. +configuration <- ConfigAddEntry(configuration, "experiments", + "last", "ExampleExperiment2", "ExampleVariable", + "/path/to/ExampleExperiment2/", + "ExampleVariable/ExampleVariable_$START_DATE$.nc") +# Edit entry to generalize for any variable. Changing variable needs . +configuration <- ConfigEditEntry(configuration, "experiments", 1, + var_name = ".*", + file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +# Now apply matching entries for variable and experiment name and show the +# result +match_info <- ConfigApplyMatchingEntries(configuration, 'tas', + exp = c('ExampleExperiment2'), show_result = TRUE) + +} +\seealso{ +[ConfigApplyMatchingEntries()], [ConfigEditDefinition()], + [ConfigEditEntry()], [ConfigFileOpen()], [ConfigShowSimilarEntries()], + [ConfigShowTable()]. +} + diff --git a/man/ConfigEditEntry.Rd b/man/ConfigEditEntry.Rd new file mode 100644 index 0000000000000000000000000000000000000000..9abf3e522071ece12dec0db31e72c22c456a8c73 --- /dev/null +++ b/man/ConfigEditEntry.Rd @@ -0,0 +1,102 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ConfigEditEntry.R +\name{ConfigEditEntry} +\alias{ConfigAddEntry} +\alias{ConfigEditEntry} +\alias{ConfigRemoveEntry} +\title{Add, Remove Or Edit Entries In The Configuration} +\usage{ +ConfigEditEntry(configuration, dataset_type, position, dataset_name = NULL, + var_name = NULL, main_path = NULL, file_path = NULL, + nc_var_name = NULL, suffix = NULL, varmin = NULL, varmax = NULL) + +ConfigAddEntry(configuration, dataset_type, position = "last", + dataset_name = ".*", var_name = ".*", main_path = "*", + file_path = "*", nc_var_name = "*", suffix = "*", varmin = "*", + varmax = "*") + +ConfigRemoveEntry(configuration, dataset_type, dataset_name = NULL, + var_name = NULL, position = NULL) +} +\arguments{ +\item{configuration}{Configuration object obtained via ConfigFileOpen() +or ConfigFileCreate() that will be modified accordingly.} + +\item{dataset_type}{Whether to modify a table of experimental datasets or +a table of observational datasets. Can take values 'experiments' or +'observations' respectively.} + +\item{position}{'position' tells the index in the table of the entry to +edit or remove. Use ConfigShowTable() to see the index of the entry. +In ConfigAddEntry() it can also take the value "last" (default), that will +put the entry at the end of the corresponding level, or "first" at the +beginning. See ?ConfigFileOpen for more information. +If 'dataset_name' and 'var_name' are specified this argument is ignored in +ConfigRemoveEntry().} + +\item{dataset_name, var_name, main_path, file_path, nc_var_name, suffix, varmin, varmax}{These parameters tell the dataset name, variable name, main path, ..., of +the entry to add, edit or remove.\cr 'dataset_name' and 'var_name' can take +as a value a POSIX 1003.2 regular expression (see ?ConfigFileOpen).\cr +Other parameters can take as a value a shell globbing expression +(see ?ConfigFileOpen).\cr +'dataset_name' and 'var_name' take by default the regular expression '.*' +(match any dataset and variable name), and the others take by default '*' +(associate to the pair 'dataset_name' and 'var_name' all the defined +default values. In this case '*' has a special behaviour, it won't be +used as a shell globbing expression. See ?ConfigFileOpen and +?ConfigShowDefinitions).\cr +'var_min' and 'var_max' must be a character string.\cr +To define these values, you can use defined variables via $VARIABLE_NAME$ +or other entry attributes via $ATTRIBUTE_NAME$. See ?ConfigFileOpen for +more information.} +} +\value{ +The function returns an accordingly modified configuration object. + To apply the changes in the configuration file it must be saved using + ConfigFileSave(). +} +\description{ +ConfigAddEntry(), ConfigEditEntry() and ConfigRemoveEntry() are functions +to manage entries in a configuration object created with ConfigFileOpen().\cr +Before adding an entry, make sure the defaults don't do already what you +want (ConfigShowDefinitions(), ConfigShowTable()).\cr +Before adding an entry, make sure it doesn't override and spoil what other +entries do (ConfigShowTable(), ConfigFileOpen()).\cr +Before adding an entry, make sure there aren't other entries that already +do what you want (ConfigShowSimilarEntries()). +} +\examples{ +# Create an empty configuration file +config_file <- paste0(tempdir(), "/example.conf") +ConfigFileCreate(config_file, confirm = FALSE) +# Open it into a configuration object +configuration <- ConfigFileOpen(config_file) +# Add an entry at the bottom of 4th level of file-per-startdate experiments +# table which will associate the experiment "ExampleExperiment" and variable +# "ExampleVariable" to some information about its location. +configuration <- ConfigAddEntry(configuration, "experiments", + "last", "ExampleExperiment", "ExampleVariable", + "/path/to/ExampleExperiment/", + "ExampleVariable/ExampleVariable_$START_DATE$.nc") +# Add another entry +configuration <- ConfigAddEntry(configuration, "experiments", + "last", "ExampleExperiment2", "ExampleVariable", + "/path/to/ExampleExperiment2/", + "ExampleVariable/ExampleVariable_$START_DATE$.nc") +# Edit second entry to generalize for any variable. Changing variable needs . +configuration <- ConfigEditEntry(configuration, "experiments", 2, + var_name = ".*", + file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +# Remove first entry +configuration <- ConfigRemoveEntry(configuration, "experiments", + "ExampleExperiment", "ExampleVariable") +# Show results +ConfigShowTable(configuration, "experiments") +# Save the configuration +ConfigFileSave(configuration, config_file, confirm = FALSE) +} +\seealso{ +ConfigApplyMatchingEntries, ConfigEditDefinition, ConfigEditEntry, + ConfigFileOpen, ConfigShowSimilarEntries, ConfigShowTable +} + diff --git a/man/ConfigFileOpen.Rd b/man/ConfigFileOpen.Rd new file mode 100644 index 0000000000000000000000000000000000000000..cf40e0011faba312b0661906b2ab2fe3a14c427d --- /dev/null +++ b/man/ConfigFileOpen.Rd @@ -0,0 +1,197 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ConfigFileOpen.R +\name{ConfigFileOpen} +\alias{ConfigFileCreate} +\alias{ConfigFileOpen} +\alias{ConfigFileSave} +\title{Functions To Create Open And Save Configuration File} +\usage{ +ConfigFileOpen(file_path, silent = FALSE, stop = FALSE) + +ConfigFileCreate(file_path, confirm = TRUE) + +ConfigFileSave(configuration, file_path, confirm = TRUE) +} +\arguments{ +\item{file_path}{Path to the configuration file to create/open/save.} + +\item{silent}{Flag to activate or deactivate verbose mode. +Defaults to FALSE (verbose mode on).} + +\item{stop}{TRUE/FALSE whether to raise an error if not all the mandatory +default variables are defined in the configuration file.} + +\item{confirm}{Flag to stipulate whether to ask for confirmation when +saving a configuration file that already exists.\cr +Defaults to TRUE (confirmation asked).} + +\item{configuration}{Configuration object to save in a file.} +} +\value{ +ConfigFileOpen() returns a configuration object with all the information for + the configuration file mechanism to work.\cr +ConfigFileSave() returns TRUE if the file has been saved and FALSE otherwise.\cr +ConfigFileCreate() returns nothing. +} +\description{ +These functions help in creating, opening and saving configuration files. +} +\details{ +ConfigFileOpen() loads all the data contained in the configuration file +specified as parameter 'file_path'. +Returns a configuration object with the variables needed for the +configuration file mechanism to work. +This function is called from inside the Load() function to load the +configuration file specified in 'configfile'.\cr\cr +ConfigFileCreate() creates an empty configuration file and saves it to +the specified path. It may be opened later with ConfigFileOpen() to be edited. +Some default values are set when creating a file with this function, you +can check these with ConfigShowDefinitions().\cr\cr +ConfigFileSave() saves a configuration object into a file, which may then +be used from Load().\cr\cr +Two examples of configuration files can be found inside the 'inst/config/' +folder in the package: + \itemize{ + \item{BSC.conf: configuration file used at BSC-CNS. Contains location + data on several datasets and variables.} + \item{template.conf: very simple configuration file intended to be used as + pattern when starting from scratch.} + } +How the configuration file works:\cr +~~~~~~~~~~~~~~~~~~~~~~~~~~~~\cr +It contains one list and two tables.\cr +Each of these have a header that starts with '!!'. These are key lines and +should not be removed or reordered.\cr +Lines starting with '#' and blank lines will be ignored. +The list should contains variable definitions and default value definitions.\cr +The first table contains information about experiments.\cr +The third table contains information about observations.\cr +Each table entry is a list of comma-separated elements.\cr +The two first are part of a key that is associated to a value formed by the +other elements.\cr +The key elements are a dataset identifier and a variable name.\cr +The value elements are the dataset main path, dataset file path, the +variable name inside the .nc file, a default suffix (explained below) and a +minimum and maximum vaues beyond which loaded data is deactivated.\cr +Given a dataset name and a variable name, a full path is obtained +concatenating the main path and the file path.\cr +Also the nc variable name, the suffixes and the limit values are obtained.\cr +Any of the elements in the keys can contain regular expressions[1] that will +cause matching for sets of dataset names or variable names.\cr +The dataset path and file path can contain shell globbing expressions[2] +that will cause matching for sets of paths when fetching the file in the +full path.\cr +The full path can point to an OPeNDAP URL.\cr +Any of the elements in the value can contain variables that will be replaced +to an associated string.\cr +Variables can be defined only in the list at the top of the file. \cr +The pattern of a variable definition is\cr +VARIABLE_NAME = VARIABLE_VALUE\cr +and can be accessed from within the table values or from within the variable +values as\cr + $VARIABLE_NAME$\cr +For example:\cr + FILE_NAME = tos.nc\cr + !!table of experiments\cr + ecmwf, tos, /path/to/dataset/, $FILE_NAME$\cr +There are some reserved variables that will offer information about the +store frequency, the current startdate Load() is fetching, etc:\cr + $VAR_NAME$, $START_DATE$, $STORE_FREQ$, $MEMBER_NUMBER$\cr + for experiments only: $EXP_NAME$\cr + for observations only: $OBS_NAME$, $YEAR$, $MONTH$, $DAY$\cr +Additionally, from an element in an entry value you can access the other +elements of the entry as:\cr + $EXP_MAIN_PATH$, $EXP_FILE_PATH$, \cr$VAR_NAME$, $SUFFIX$, $VAR_MIN$, $VAR_MAX$\cr +\cr +The variable $SUFFIX$ is useful because it can be used to take part in the +main or file path. For example: '/path/to$SUFFIX$/dataset/'.\cr +It will be replaced by the value in the column that corresponds to the +suffix unless the user specifies a different suffix via the parameter +'suffixexp' or 'suffixobs'.\cr +This way the user is able to load two variables with the same name in the +same dataset but with slight modifications, with a suffix anywhere in the +path to the data that advices of this slight modification.\cr\cr +The entries in a table will be grouped in 4 levels of specificity: + \enumerate{ + \item{ +General entries:\cr +- the key dataset name and variable name are both a regular expression +matching any sequence of characters (.*) that will cause matching for any +pair of dataset and variable names\cr + Example: .*, .*, /dataset/main/path/, file/path, nc_var_name, suffix, +var_min, var_max + } + \item{ +Dataset entries:\cr +- the key variable name matches any sequence of characters\cr + Example: ecmwf, .*, /dataset/main/path/, file/path, nc_var_name, + suffix, var_min, var_max + } + \item{ +Variable entries:\cr +- the key dataset name matches any sequence of characters\cr + Example: .*, tos, /dataset/main/path/, file/path, nc_var_name, + suffix, var_min, var_max + } + \item{ + Specific entries:\cr +- both key values are specified\cr + Example: ecmwf, tos, /dataset/main/path/, file/path, nc_var_name, + suffix, var_min, var_max + } + } +Given a pair of dataset name and variable name for which we want to know the +full path, all the rules that match will be applied from more general to +more specific.\cr +If there is more than one entry per group that match a given key pair, +these will be applied in the order of appearance in the configuration file +(top to bottom).\cr\cr +An asterisk (*) in any value element will be interpreted as 'leave it as is +or take the default value if yet not defined'.\cr +The default values are defined in the following reserved variables:\cr + $DEFAULT_EXP_MAIN_PATH$, $DEFAULT_EXP_FILE_PATH$, $DEFAULT_NC_VAR_NAME$, +$DEFAULT_OBS_MAIN_PATH$, $DEFAULT_OBS_FILE_PATH$, $DEFAULT_SUFFIX$, +$DEFAULT_VAR_MIN$, $DEFAULT_VAR_MAX$, \cr +$DEFAULT_DIM_NAME_LATITUDES$, $DEFAULT_DIM_NAME_LONGITUDES$, \cr +$DEFAULT_DIM_NAME_MEMBERS$\cr\cr +Trailing asterisks in an entry are not mandatory. For example\cr + ecmwf, .*, /dataset/main/path/, *, *, *, *, *\cr +will have the same effect as\cr + ecmwf, .*, /dataset/main/path/ \cr\cr +A double quote only (") in any key or value element will be interpreted as +'fill in with the same value as the entry above'. +} +\examples{ +# Create an empty configuration file +config_file <- paste0(tempdir(), "/example.conf") +ConfigFileCreate(config_file, confirm = FALSE) +# Open it into a configuration object +configuration <- ConfigFileOpen(config_file) +# Add an entry at the bottom of 4th level of file-per-startdate experiments +# table which will associate the experiment "ExampleExperiment2" and variable +# "ExampleVariable" to some information about its location. +configuration <- ConfigAddEntry(configuration, "experiments", + "last", "ExampleExperiment2", "ExampleVariable", + "/path/to/ExampleExperiment2/", + "ExampleVariable/ExampleVariable_$START_DATE$.nc") +# Edit entry to generalize for any variable. Changing variable needs . +configuration <- ConfigEditEntry(configuration, "experiments", 1, + var_name = ".*", + file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +# Now apply matching entries for variable and experiment name and show the +# result +match_info <- ConfigApplyMatchingEntries(configuration, 'tas', + exp = c('ExampleExperiment2'), show_result = TRUE) +# Finally save the configuration file. +ConfigFileSave(configuration, config_file, confirm = FALSE) + +} +\references{ +[1] \url{https://stat.ethz.ch/R-manual/R-devel/library/base/html/regex.html}\cr +[2] \url{http://tldp.org/LDP/abs/html/globbingref.html} +} +\seealso{ +ConfigApplyMatchingEntries, ConfigEditDefinition, + ConfigEditEntry, ConfigFileOpen, ConfigShowSimilarEntries, ConfigShowTable +} + diff --git a/man/ConfigShowSimilarEntries.Rd b/man/ConfigShowSimilarEntries.Rd new file mode 100644 index 0000000000000000000000000000000000000000..b9f80ced42f8a8683939346b1f8a9616b1940e7e --- /dev/null +++ b/man/ConfigShowSimilarEntries.Rd @@ -0,0 +1,82 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ConfigShowSimilarEntries.R +\name{ConfigShowSimilarEntries} +\alias{ConfigShowSimilarEntries} +\title{Find Similar Entries In Tables Of Datasets} +\usage{ +ConfigShowSimilarEntries(configuration, dataset_name = NULL, + var_name = NULL, main_path = NULL, file_path = NULL, + nc_var_name = NULL, suffix = NULL, varmin = NULL, varmax = NULL, + n_results = 10) +} +\arguments{ +\item{configuration}{Configuration object obtained either from +ConfigFileCreate() or ConfigFileOpen().} + +\item{dataset_name}{Optional dataset name to look for similars of.} + +\item{var_name}{Optional variable name to look for similars of.} + +\item{main_path}{Optional main path to look for similars of.} + +\item{file_path}{Optional file path to look for similars of.} + +\item{nc_var_name}{Optional variable name inside NetCDF file to look for similars of.} + +\item{suffix}{Optional suffix to look for similars of.} + +\item{varmin}{Optional variable minimum to look for similars of.} + +\item{varmax}{Optional variable maximum to look for similars of.} + +\item{n_results}{Top 'n_results' alike results will be shown only. Defaults +to 10 in ConfigShowSimilarEntries() and to 5 in ConfigShowSimilarVars().} +} +\value{ +These functions return information about the found matches. +} +\description{ +These functions help in finding similar entries in tables of supported +datasets by comparing all entries with some given information.\cr +This is useful when dealing with complex configuration files and not sure +if already support certain variables or datasets.\cr +At least one field must be provided in ConfigShowSimilarEntries(). +Other fields can be unspecified and won't be taken into account. If more +than one field is provided, sameness is avreaged over all provided fields +and entries are sorted from higher average to lower. +} +\details{ +Sameness is calculated with string distances as specified by Simon White +in [1]. +} +\examples{ +# Create an empty configuration file +config_file <- paste0(tempdir(), "/example.conf") +ConfigFileCreate(config_file, confirm = FALSE) +# Open it into a configuration object +configuration <- ConfigFileOpen(config_file) +# Add an entry at the bottom of 4th level of file-per-startdate experiments +# table which will associate the experiment "ExampleExperiment2" and variable +# "ExampleVariable" to some information about its location. +configuration <- ConfigAddEntry(configuration, "experiments", "last", + "ExampleExperiment2", "ExampleVariable", + "/path/to/ExampleExperiment2/", + "ExampleVariable/ExampleVariable_$START_DATE$.nc") +# Edit entry to generalize for any variable. Changing variable needs . +configuration <- ConfigEditEntry(configuration, "experiments", 1, + var_name = "Var.*", + file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +# Look for similar entries +ConfigShowSimilarEntries(configuration, dataset_name = "Exper", + var_name = "Vari") + +} +\references{ +[1] Simon White, string seamness: + \url{http://www.catalysoft.com/articles/StrikeAMatch.html} +} +\seealso{ +ConfigApplyMatchingEntries, ConfigEditDefinition, + ConfigEditEntry, ConfigFileOpen, ConfigShowSimilarEntries, ConfigShowTable +} + diff --git a/man/ConfigShowTable.Rd b/man/ConfigShowTable.Rd new file mode 100644 index 0000000000000000000000000000000000000000..7c08053e7e3aa21ca76a3f2ac951e05ddec0b175 --- /dev/null +++ b/man/ConfigShowTable.Rd @@ -0,0 +1,57 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ConfigShowTable.R +\name{ConfigShowTable} +\alias{ConfigShowDefinitions} +\alias{ConfigShowTable} +\title{Show Configuration Tables And Definitions} +\usage{ +ConfigShowTable(configuration, dataset_type, line_numbers = NULL) + +ConfigShowDefinitions(configuration) +} +\arguments{ +\item{configuration}{Configuration object obtained from ConfigFileCreate() +or ConfigFileOpen().} + +\item{dataset_type}{In ConfigShowTable(), 'dataset_type' tells whether the +table to show is of experimental datasets or of observational datasets. +Can take values 'experiments' or 'observations'.} + +\item{line_numbers}{'line_numbers' is an optional vector of numbers as long +as the number of entries in the specified table. Intended for internal use.} +} +\value{ +These functions return nothing. +} +\description{ +These functions show the tables of supported datasets and definitions in a +configuration object obtained via ConfigFileCreate() or ConfigFileOpen(). +} +\examples{ +# Create an empty configuration file +config_file <- paste0(tempdir(), "/example.conf") +ConfigFileCreate(config_file, confirm = FALSE) +# Open it into a configuration object +configuration <- ConfigFileOpen(config_file) +# Add an entry at the bottom of 4th level of file-per-startdate experiments +# table which will associate the experiment "ExampleExperiment2" and variable +# "ExampleVariable" to some information about its location. +configuration <- ConfigAddEntry(configuration, "experiments", "last", + "ExampleExperiment2", "ExampleVariable", + "/path/to/ExampleExperiment2/", + "ExampleVariable/ExampleVariable_$START_DATE$.nc") +# Edit entry to generalize for any variable. Changing variable needs . +configuration <- ConfigEditEntry(configuration, "experiments", 1, + var_name = ".*", + file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc") +# Show tables, lists and definitions +ConfigShowTable(configuration, 'experiments') +ConfigShowDefinitions(configuration) + +} +\seealso{ +[ConfigApplyMatchingEntries()], [ConfigEditDefinition()], + [ConfigEditEntry()], [ConfigFileOpen()], [ConfigShowSimilarEntries()], + [ConfigShowTable()]. +} + diff --git a/man/Corr.Rd b/man/Corr.Rd index c44acac92a9e7330481cdc221a464573558e9ae9..45eb166197dd3fef4ece125678d266915607cfb4 100644 --- a/man/Corr.Rd +++ b/man/Corr.Rd @@ -77,29 +77,8 @@ significance level relies on an one-sided student-T distribution.\cr # Load sample data as in Load() example: example(Load) clim <- Clim(sampleData$mod, sampleData$obs) -ano_exp <- Ano(sampleData$mod, clim$clim_exp) -ano_obs <- Ano(sampleData$obs, clim$clim_obs) -runmean_months <- 12 -dim_to_smooth <- 4 -# Smooth along lead-times -smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth) -smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth) -leadtimes_per_startdate <- 60 -corr <- Corr(smooth_ano_exp, - smooth_ano_obs, - comp_dim = 'ftime', #Discard start dates which contain any NA ftime - limits = c(ceiling((runmean_months + 1) / 2), - leadtimes_per_startdate - floor(runmean_months / 2))) +corr <- Corr(clim$clim_exp, clim$clim_obs, time_dim = 'ftime') +# Renew the example when Ano and Smoothing is ready } -\author{ -History:\cr -0.1 - 2011-04 (V. Guemas, \email{vguemas@bsc.es}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to R CRAN\cr -1.1 - 2014-10 (M. Menegoz, \email{martin.menegoz@bsc.es}) - Adding conf.lev argument\cr -1.2 - 2015-03 (L.P. Caron, \email{louis-philippe.caron@bsc.es}) - Adding method argument\cr -1.3 - 2017-02 (A. Hunter, \email{alasdair.hunter@bsc.es}) - Adapted to veriApply() -3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Adapt multiApply feature -} -\keyword{datagen} diff --git a/man/Eno.Rd b/man/Eno.Rd index 53f2813102a9290601bee9a504765bfd7cca1fdc..32468bdddb048d1cf14c345ef6aedc4e37bb54d8 100644 --- a/man/Eno.Rd +++ b/man/Eno.Rd @@ -39,11 +39,4 @@ data[na] <- NA res <- Eno(data) } -\author{ -History:\cr -0.1 - 2011-05 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to R CRAN -3.0 - 2019-12 (A. Ho, \email{an.ho@bsc.es}) - Adopt multiApply feature -} -\keyword{datagen} diff --git a/man/InsertDim.Rd b/man/InsertDim.Rd index 1f6aac6622c8187d0e4e50920eb16013682206b1..8ab628d3eb3d99d50c94e0984aade50707b18e6e 100644 --- a/man/InsertDim.Rd +++ b/man/InsertDim.Rd @@ -32,12 +32,4 @@ res <- InsertDim(InsertDim(a, posdim = 2, lendim = 1, name = 'e'), 4, c(f = 2)) dim(res) } -\author{ -History:\cr -0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to R CRAN\cr -1.1 - 2015-03 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Improvements -3.0 - 2019-12 (A. Ho, \email{an.ho@bsc.es}) - Modify with multiApply -} -\keyword{datagen} diff --git a/man/LeapYear.Rd b/man/LeapYear.Rd index 12b02b49a48ed2ec974e35b24d94fbd803aa40aa..d261b0aea8d5697c7250114d7254affbca79b26c 100644 --- a/man/LeapYear.Rd +++ b/man/LeapYear.Rd @@ -21,10 +21,4 @@ print(LeapYear(1991)) print(LeapYear(1992)) print(LeapYear(1993)) } -\author{ -History:\cr -0.1 - 2011-03 (V. Guemas, \email{vguemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN -} -\keyword{datagen} diff --git a/man/Load.Rd b/man/Load.Rd index c721e6139f4cbe77b263fe0e626de71feea35997..214f984225c858da01f12ad9a80d8c66c373beff 100644 --- a/man/Load.Rd +++ b/man/Load.Rd @@ -315,7 +315,7 @@ E.g., list(array(1, dim = c(num_lons, num_lats)))} \item{maskobs}{See help on parameter 'maskmod'.} -\item{configfile}{Path to the s2dverification configuration file from which +\item{configfile}{Path to the s2dv configuration file from which to retrieve information on location in file system (and other) of datasets.\cr If not specified, the configuration file used at BSC-ES will be used (it is included in the package).\cr @@ -565,7 +565,7 @@ specified output type is area averaged time series the data is averaged on the individual grid of each dataset but can also be averaged after interpolating into a common grid. See parameters 'grid' and 'method'.\cr Once the two arrays are filled by calling this function, other functions in -the s2dverification package that receive as inputs data formatted in this +the s2dv package that receive as inputs data formatted in this data structure can be executed (e.g: \code{Clim()} to compute climatologies, \code{Ano()} to compute anomalies, ...).\cr\cr Load() has many additional parameters to disable values and trim dimensions @@ -797,7 +797,7 @@ to the package or check the comments in the code. # Example 1: Providing lists of lists to 'exp' and 'obs': # \dontrun{ -data_path <- system.file('sample_data', package = 's2dverification') +data_path <- system.file('sample_data', package = 's2dv') exp <- list( name = 'experiment', path = file.path(data_path, 'model/$EXP_NAME$/monthly_mean', @@ -823,7 +823,7 @@ sampleData <- Load('tos', list(exp), list(obs), startDates, # writing a configuration file). # \dontrun{ -data_path <- system.file('sample_data', package = 's2dverification') +data_path <- system.file('sample_data', package = 's2dv') expA <- list(name = 'experiment', path = file.path(data_path, 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', '$VAR_NAME$_$START_DATE$.nc')) @@ -837,7 +837,7 @@ sampleData <- Load('tos', list(expA), list(obsX), startDates, output = 'areave', latmin = 27, latmax = 48, lonmin = -12, lonmax = 40) # -# Example 2: providing character strings in 'exp' and 'obs', and providing +# Example 3: providing character strings in 'exp' and 'obs', and providing # a configuration file. # The configuration file 'sample.conf' that we will create in the example # has the proper entries to load these (see ?LoadConfigFile for details on @@ -848,7 +848,7 @@ ConfigFileCreate(configfile, confirm = FALSE) c <- ConfigFileOpen(configfile) c <- ConfigEditDefinition(c, 'DEFAULT_VAR_MIN', '-1e19', confirm = FALSE) c <- ConfigEditDefinition(c, 'DEFAULT_VAR_MAX', '1e19', confirm = FALSE) -data_path <- system.file('sample_data', package = 's2dverification') +data_path <- system.file('sample_data', package = 's2dv') exp_data_path <- paste0(data_path, '/model/$EXP_NAME$/') obs_data_path <- paste0(data_path, '/$OBS_NAME$/') c <- ConfigAddEntry(c, 'experiments', dataset_name = 'experiment', @@ -867,20 +867,11 @@ sampleData <- Load('tos', c('experiment'), c('observation'), startDates, } \dontshow{ startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), - c('observation'), startDates, - output = 'areave', - latmin = 27, latmax = 48, - lonmin = -12, lonmax = 40) +sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), + c('observation'), startDates, + output = 'areave', + latmin = 27, latmax = 48, + lonmin = -12, lonmax = 40) } } -\author{ -History:\cr -0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@bsc.es}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to CRAN\cr -1.2 - 2015-02 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Generalisation + parallelisation\cr -1.3 - 2015-07 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Improvements related to configuration file mechanism\cr -1.4 - 2016-01 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Added subsetting capabilities -} -\keyword{datagen} diff --git a/man/MeanDims.Rd b/man/MeanDims.Rd index f1c05bd99571bf34f6ffce0daa0a4cc311cf6892..f2140f31fd9bb2476c97f4348f42c3bf6780affd 100644 --- a/man/MeanDims.Rd +++ b/man/MeanDims.Rd @@ -30,12 +30,4 @@ print(dim(MeanDims(a, 2))) print(dim(MeanDims(a, c(2, 3)))) print(dim(MeanDims(a, c('a', 'b')))) } -\author{ -History:\cr -0.1 - 2011-04 (V. Guemas, \email{vguemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to R CRAN\cr -1.1 - 2015-03 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Improved memory usage -3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Preserve dimension names -} -\keyword{datagen} diff --git a/man/Plot2VarsVsLTime.Rd b/man/Plot2VarsVsLTime.Rd deleted file mode 100644 index 8ba44e4ee8838e58042d9bb6d0fa39cc306433e1..0000000000000000000000000000000000000000 --- a/man/Plot2VarsVsLTime.Rd +++ /dev/null @@ -1,123 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/Plot2VarsVsLTime.R -\name{Plot2VarsVsLTime} -\alias{Plot2VarsVsLTime} -\title{Plot Two Scores With Confidence Intervals In A Common Plot} -\usage{ -Plot2VarsVsLTime(var1, var2, toptitle = "", ytitle = "", monini = 1, - freq = 12, nticks = NULL, limits = NULL, listexp = c("exp1", "exp2", - "exp3"), listvars = c("var1", "var2"), biglab = FALSE, hlines = NULL, - leg = TRUE, siglev = FALSE, sizetit = 1, show_conf = TRUE, - fileout = "output_plot2varsvsltime.eps", width = 8, height = 5, - size_units = "in", res = 100, ...) -} -\arguments{ -\item{var1}{Matrix of dimensions (nexp/nmod, nltime).} - -\item{var2}{Matrix of dimensions (nexp/nmod, nltime).} - -\item{toptitle}{Main title, optional.} - -\item{ytitle}{Title of Y-axis, optional.} - -\item{monini}{Starting month between 1 and 12. Default = 1.} - -\item{freq}{1 = yearly, 12 = monthly, 4 = seasonal, ... Default = 12.} - -\item{nticks}{Number of ticks and labels on the x-axis, optional.} - -\item{limits}{c(lower limit, upper limit): limits of the Y-axis, optional.} - -\item{listexp}{List of experiment names, up to three, optional.} - -\item{listvars}{List of names of input variables, optional.} - -\item{biglab}{TRUE/FALSE for presentation/paper plot. Default = FALSE.} - -\item{hlines}{c(a, b, ...) Add horizontal black lines at Y-positions a, b, -...\cr -Default: NULL.} - -\item{leg}{TRUE/FALSE if legend should be added or not to the plot. -Default = TRUE.} - -\item{siglev}{TRUE/FALSE if significance level should replace confidence -interval.\cr -Default = FALSE.} - -\item{sizetit}{Multiplicative factor to change title size, optional.} - -\item{show_conf}{TRUE/FALSE to show/not confidence intervals for input -variables.} - -\item{fileout}{Name of output file. Extensions allowed: eps/ps, jpeg, png, -pdf, bmp and tiff. \cr -Default = 'output_plot2varsvsltime.eps'} - -\item{width}{File width, in the units specified in the parameter size_units -(inches by default). Takes 8 by default.} - -\item{height}{File height, in the units specified in the parameter -size_units (inches by default). Takes 5 by default.} - -\item{size_units}{Units of the size of the device (file or window) to plot -in. Inches ('in') by default. See ?Devices and the creator function of the -corresponding device.} - -\item{res}{Resolution of the device (file or window) to plot in. See -?Devices and the creator function of the corresponding device.} - -\item{...}{Arguments to be passed to the method. Only accepts the following -graphical parameters:\cr -adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt -csi cxy err family fg fig font font.axis font.lab font.main font.sub lend -lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page pch plt -smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr -For more information about the parameters see `par`.} -} -\description{ -Plots two input variables having the same dimensions in a common plot.\cr -One plot for all experiments.\cr -Input variables should have dimensions (nexp/nmod, nltime). -} -\details{ -Examples of input:\cr -------------------\cr\cr -RMSE error for a number of experiments and along lead-time: (nexp, nltime) -} -\examples{ -# Load sample data as in Load() example: -example(Load) -clim <- Clim(sampleData$mod, sampleData$obs) -ano_exp <- Ano(sampleData$mod, clim$clim_exp) -ano_obs <- Ano(sampleData$obs, clim$clim_obs) -runmean_months <- 12 -dim_to_smooth <- 4 # Smooth along lead-times -smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth) -smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth) -dim_to_mean <- 2 # Mean along members -required_complete_row <- 3 # Discard start dates that contain NA along lead-times -leadtimes_per_startdate <- 60 -rms <- RMS(Mean1Dim(smooth_ano_exp, dim_to_mean), - Mean1Dim(smooth_ano_obs, dim_to_mean), - compROW = required_complete_row, - limits = c(ceiling((runmean_months + 1) / 2), - leadtimes_per_startdate - floor(runmean_months / 2))) -smooth_ano_exp_m_sub <- smooth_ano_exp - InsertDim(Mean1Dim(smooth_ano_exp, 2, - narm = TRUE), 2, dim(smooth_ano_exp)[2]) -spread <- Spread(smooth_ano_exp_m_sub, c(2, 3)) - \donttest{ -Plot2VarsVsLTime(InsertDim(rms[, , , ], 1, 1), spread$sd, - toptitle = 'RMSE and spread', monini = 11, freq = 12, - listexp = c('CMIP5 IC3'), listvar = c('RMSE', 'spread'), - fileout = 'plot2vars.eps') - } - -} -\author{ -History:\cr -1.0 - 2013-03 (I. Andreu-Burillo, \email{isabel.andreu-burillo@ic3.cat}) - - Original code -} -\keyword{dynamic} - diff --git a/man/PlotACC.Rd b/man/PlotACC.Rd deleted file mode 100644 index fc66200a0388b94d810c868be5335b9dcf1c18fe..0000000000000000000000000000000000000000 --- a/man/PlotACC.Rd +++ /dev/null @@ -1,125 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/PlotACC.R -\name{PlotACC} -\alias{PlotACC} -\title{Plot Plumes/Timeseries Of Anomaly Correlation Coefficients} -\usage{ -PlotACC(ACC, sdates, toptitle = "", sizetit = 1, ytitle = "", - limits = NULL, legends = NULL, freq = 12, biglab = FALSE, - fill = FALSE, linezero = FALSE, points = TRUE, vlines = NULL, - fileout = "output_PlotACC.eps", width = 8, height = 5, - size_units = "in", res = 100, ...) -} -\arguments{ -\item{ACC}{ACC matrix with with dimensions:\cr -c(nexp, nobs, nsdates, nltime, 4)\cr -with the fourth dimension of length 4 containing the lower limit of the -95\% confidence interval, the ACC, the upper limit of the 95\% confidence -interval and the 95\% significance level.} - -\item{sdates}{List of startdates: c('YYYYMMDD','YYYYMMDD').} - -\item{toptitle}{Main title, optional.} - -\item{sizetit}{Multiplicative factor to scale title size, optional.} - -\item{ytitle}{Title of Y-axis for each experiment: c('',''), optional.} - -\item{limits}{c(lower limit, upper limit): limits of the Y-axis, optional.} - -\item{legends}{List of flags (characters) to be written in the legend, -optional.} - -\item{freq}{1 = yearly, 12 = monthly, 4 = seasonal, ... Default: 12.} - -\item{biglab}{TRUE/FALSE for presentation/paper plot, Default = FALSE.} - -\item{fill}{TRUE/FALSE if filled confidence interval. Default = FALSE.} - -\item{linezero}{TRUE/FALSE if a line at y=0 should be added. Default = FALSE.} - -\item{points}{TRUE/FALSE if points instead of lines. Default = TRUE.\cr -Must be TRUE if only 1 leadtime.} - -\item{vlines}{List of x location where to add vertical black lines, optional.} - -\item{fileout}{Name of output file. Extensions allowed: eps/ps, jpeg, png, -pdf, bmp and tiff. \cr -Default = 'output_PlotACC.eps'} - -\item{width}{File width, in the units specified in the parameter size_units -(inches by default). Takes 8 by default.} - -\item{height}{File height, in the units specified in the parameter -size_units (inches by default). Takes 5 by default.} - -\item{size_units}{Units of the size of the device (file or window) to plot -in. Inches ('in') by default. See ?Devices and the creator function of the -corresponding device.} - -\item{res}{Resolution of the device (file or window) to plot in. See -?Devices and the creator function of the corresponding device.} - -\item{\dots}{Arguments to be passed to the method. Only accepts the following -graphical parameters:\cr -adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt -csi cxy err family fg fig fin font font.axis font.lab font.main font.sub -lend lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page -plt smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog\cr -For more information about the parameters see `par`.} -} -\description{ -Plots plumes/timeseries of ACC from an array with dimensions -(output from \code{ACC()}): \cr -c(nexp, nobs, nsdates, nltime, 4)\cr -where the fourth dimension is of length 4 and contains the lower limit of -the 95\% confidence interval, the ACC, the upper limit of the 95\% -confidence interval and the 95\% significance level given by a one-sided -T-test. -} -\examples{ -# See examples on Load() to understand the first lines in this example - \dontrun{ -data_path <- system.file('sample_data', package = 's2dverification') -expA <- list(name = 'experiment', path = file.path(data_path, - 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', - '$VAR_NAME$_$START_DATE$.nc')) -obsX <- list(name = 'observation', path = file.path(data_path, - '$OBS_NAME$/$STORE_FREQ$_mean/$VAR_NAME$', - '$VAR_NAME$_$YEAR$$MONTH$.nc')) - -# Now we are ready to use Load(). -startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -sampleData <- Load('tos', list(expA), list(obsX), startDates, - leadtimemin = 1, leadtimemax = 4, output = 'lonlat', - latmin = 27, latmax = 48, lonmin = -12, lonmax = 40) - } - \dontshow{ -startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), - c('observation'), startDates, - leadtimemin = 1, - leadtimemax = 4, - output = 'lonlat', - latmin = 27, latmax = 48, - lonmin = -12, lonmax = 40) - } -sampleData$mod <- Season(sampleData$mod, 4, 11, 12, 2) -sampleData$obs <- Season(sampleData$obs, 4, 11, 12, 2) -clim <- Clim(sampleData$mod, sampleData$obs) -ano_exp <- Ano(sampleData$mod, clim$clim_exp) -ano_obs <- Ano(sampleData$obs, clim$clim_obs) -acc <- ACC(Mean1Dim(sampleData$mod, 2), - Mean1Dim(sampleData$obs, 2)) - \donttest{ -PlotACC(acc$ACC, startDates, toptitle = "Anomaly Correlation Coefficient") - - } -} -\author{ -History:\cr -0.1 - 2013-08 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN -} -\keyword{dynamic} - diff --git a/man/PlotAno.Rd b/man/PlotAno.Rd deleted file mode 100644 index dd05931046ca62108d5b8916ba2c5f6f13e92054..0000000000000000000000000000000000000000 --- a/man/PlotAno.Rd +++ /dev/null @@ -1,112 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/PlotAno.R -\name{PlotAno} -\alias{PlotAno} -\title{Plot Raw Or Smoothed Anomalies} -\usage{ -PlotAno(exp_ano, obs_ano = NULL, sdates, toptitle = rep("", 15), - ytitle = rep("", 15), limits = NULL, legends = NULL, freq = 12, - biglab = FALSE, fill = TRUE, memb = TRUE, ensmean = TRUE, - linezero = FALSE, points = FALSE, vlines = NULL, sizetit = 1, - fileout = paste0("output", 1:5, "_plotano.eps"), width = 8, height = 5, - size_units = "in", res = 100, ...) -} -\arguments{ -\item{exp_ano}{Array containing the experimental data:\cr -c(nmod/nexp, nmemb/nparam, nsdates, nltime).} - -\item{obs_ano}{Optional matrix containing the observational data:\cr -c(nobs, nmemb, nsdates, nltime)} - -\item{sdates}{List of starting dates: c('YYYYMMDD','YYYYMMDD').} - -\item{toptitle}{Main title for each experiment: c('',''), optional.} - -\item{ytitle}{Title of Y-axis for each experiment: c('',''), optional.} - -\item{limits}{c(lower limit, upper limit): limits of the Y-axis, optional.} - -\item{legends}{List of observational dataset names, optional.} - -\item{freq}{1 = yearly, 12 = monthly, 4 = seasonal, ... Default: 12.} - -\item{biglab}{TRUE/FALSE for presentation/paper plot. Default = FALSE.} - -\item{fill}{TRUE/FALSE if the spread between members should be filled. -Default = TRUE.} - -\item{memb}{TRUE/FALSE if all members/only the ensemble-mean should be -plotted.\cr -Default = TRUE.} - -\item{ensmean}{TRUE/FALSE if the ensemble-mean should be plotted. -Default = TRUE.} - -\item{linezero}{TRUE/FALSE if a line at y=0 should be added. -Default = FALSE.} - -\item{points}{TRUE/FALSE if points instead of lines should be shown. -Default = FALSE.} - -\item{vlines}{List of x location where to add vertical black lines, optional.} - -\item{sizetit}{Multiplicative factor to scale title size, optional.} - -\item{fileout}{Name of the output file for each experiment: c('',''). -Extensions allowed: eps/ps, jpeg, png, pdf, bmp and tiff. If filenames -with different extensions are passed, it will be considered only the first -one and it will be extended to the rest. \cr -Default = c('output1_plotano.eps', 'output2_plotano.eps', - 'output3_plotano.eps', 'output4_plotano.eps', - 'output5_plotano.eps')} - -\item{width}{File width, in the units specified in the parameter size_units -(inches by default). Takes 8 by default.} - -\item{height}{File height, in the units specified in the parameter -size_units (inches by default). Takes 5 by default.} - -\item{size_units}{Units of the size of the device (file or window) to plot -in. Inches ('in') by default. See ?Devices and the creator function of the -corresponding device.} - -\item{res}{Resolution of the device (file or window) to plot in. See -?Devices and the creator function of the corresponding device.} - -\item{\dots}{Arguments to be passed to the method. Only accepts the following -graphical parameters:\cr -adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt -csi cxy err family fg fig font font.axis font.lab font.main font.sub lend -lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page plt smo -srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr -For more information about the parameters see `par`.} -} -\description{ -Plots timeseries of raw or smoothed anomalies of any variable output from -\code{Load()} or \code{Ano()} or or \code{Ano_CrossValid()} or -\code{Smoothing()}. -} -\examples{ -# Load sample data as in Load() example: -example(Load) -clim <- Clim(sampleData$mod, sampleData$obs) -ano_exp <- Ano(sampleData$mod, clim$clim_exp) -ano_obs <- Ano(sampleData$obs, clim$clim_obs) -runmean_nb_months <- 12 -dim_to_smooth <- 4 # Smooth along lead-times -smooth_ano_exp <- Smoothing(ano_exp, runmean_nb_months, dim_to_smooth) -smooth_ano_obs <- Smoothing(ano_obs, runmean_nb_months, dim_to_smooth) - \donttest{ -PlotAno(smooth_ano_exp, smooth_ano_obs, startDates, - toptitle = paste('smoothed anomalies'), ytitle = c('K', 'K', 'K'), - legends = 'ERSST', biglab = FALSE, fileout = 'tos_ano.eps') - } - -} -\author{ -History:\cr -0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN -} -\keyword{dynamic} - diff --git a/man/PlotBoxWhisker.Rd b/man/PlotBoxWhisker.Rd deleted file mode 100644 index a536686be6087cfb54b8c967ef4bfbbe80a37e3b..0000000000000000000000000000000000000000 --- a/man/PlotBoxWhisker.Rd +++ /dev/null @@ -1,134 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/PlotBoxWhisker.R -\name{PlotBoxWhisker} -\alias{PlotBoxWhisker} -\title{Box-And-Whisker Plot of Time Series with Ensemble Distribution} -\usage{ -PlotBoxWhisker(exp, obs, toptitle = "", ytitle = "", monini = 1, - yearini = 0, freq = 1, expname = "exp 1", obsname = "obs 1", - drawleg = TRUE, fileout = "output_PlotBoxWhisker.ps", width = 8, - height = 5, size_units = "in", res = 100, ...) -} -\arguments{ -\item{exp}{Forecast array of multi-member time series, e.g., the NAO index -of one experiment. The expected dimensions are -c(members, start dates/forecast horizons). A vector with only the time -dimension can also be provided. Only monthly or lower frequency time -series are supported. See parameter freq.} - -\item{obs}{Observational vector or array of time series, e.g., the NAO index -of the observations that correspond the forecast data in \code{exp}. -The expected dimensions are c(start dates/forecast horizons) or -c(1, start dates/forecast horizons). Only monthly or lower frequency time -series are supported. See parameter freq.} - -\item{toptitle}{Character string to be drawn as figure title.} - -\item{ytitle}{Character string to be drawn as y-axis title.} - -\item{monini}{Number of the month of the first time step, from 1 to 12.} - -\item{yearini}{Year of the first time step.} - -\item{freq}{Frequency of the provided time series: 1 = yearly, 12 = monthly,} - -\item{expname}{Experimental dataset name.} - -\item{obsname}{Name of the observational reference dataset.} - -\item{drawleg}{TRUE/FALSE: whether to draw the legend or not.} - -\item{fileout}{Name of output file. Extensions allowed: eps/ps, jpeg, png, -pdf, bmp and tiff. \cr -Default = 'output_PlotBox.ps'.} - -\item{width}{File width, in the units specified in the parameter size_units -(inches by default). Takes 8 by default.} - -\item{height}{File height, in the units specified in the parameter -size_units (inches by default). Takes 5 by default.} - -\item{size_units}{Units of the size of the device (file or window) to plot -in. Inches ('in') by default. See ?Devices and the creator function of the -corresponding device.} - -\item{res}{Resolution of the device (file or window) to plot in. See -?Devices and the creator function of the corresponding device.} - -\item{...}{Arguments to be passed to the method. Only accepts the following -graphical parameters:\cr -ann ask bg cex.lab cex.sub cin col.axis col.lab col.main col.sub cra crt -csi cxy err family fg fig font font.axis font.lab font.main font.sub lend -lheight ljoin lmitre mex mfcol mfrow mfg mkh oma omd omi page pin plt pty -smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr -For more information about the parameters see `par`.} -} -\value{ -Generates a file at the path specified via \code{fileout}. -} -\description{ -Produce time series of box-and-whisker plot showing the distribution of the -members of a forecast vs. the observed evolution. The correlation between -forecast and observational data is calculated and displayed. Only works for -n-monthly to n-yearly time series. -} -\examples{ -# See examples on Load() to understand the first lines in this example - \dontrun{ -data_path <- system.file('sample_data', package = 's2dverification') -expA <- list(name = 'experiment', path = file.path(data_path, - 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', - '$VAR_NAME$_$START_DATE$.nc')) -obsX <- list(name = 'observation', path = file.path(data_path, - '$OBS_NAME$/$STORE_FREQ$_mean/$VAR_NAME$', - '$VAR_NAME$_$YEAR$$MONTH$.nc')) - -# Now we are ready to use Load(). -startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -sampleData <- Load('tos', list(expA), list(obsX), startDates, - leadtimemin = 1, leadtimemax = 4, output = 'lonlat', - latmin = 27, latmax = 48, lonmin = -12, lonmax = 40) - } - \dontshow{ -startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), - c('observation'), startDates, - leadtimemin = 1, - leadtimemax = 4, - output = 'lonlat', - latmin = 20, latmax = 80, - lonmin = -80, lonmax = 40) -# No example data is available over NAO region, so in this example we will -# tweak the available data. In a real use case, one can Load() the data over -# NAO region directly. -sampleData$lon[] <- c(40, 280, 340) -attr(sampleData$lon, 'first_lon') <- 280 -attr(sampleData$lon, 'last_lon') <- 40 -attr(sampleData$lon, 'data_across_gw') <- TRUE -sampleData$lat[] <- c(20, 80) -attr(sampleData$lat, 'first_lat') <- 20 -attr(sampleData$lat, 'last_lat') <- 80 - } -# Now ready to compute the EOFs and project on, for example, the first -# variability mode. -ano <- Ano_CrossValid(sampleData$mod, sampleData$obs) -nao <- NAO(ano$ano_exp, ano$ano_obs, sampleData$lon, sampleData$lat) -# Finally plot the nao index - \donttest{ -PlotBoxWhisker(nao$NAO_exp, nao$NAO_obs, "NAO index, DJF", "NAO index (PC1) TOS", - monini = 12, yearini = 1985, freq = 1, "Exp. A", "Obs. X") - } - -} -\author{ -History:\cr -0.1 - 2013-09 (F. Lienert, \email{flienert@ic3.cat}) - Original code\cr -0.2 - 2015-03 (L. Batte, \email{lauriane.batte@ic3.cat}) - Removed all\cr - normalization for sake of clarity. -1.0 - 2016-03 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to R CRAN -} -\seealso{ -EOF, ProjectField, NAO -} -\keyword{datagen} - diff --git a/man/PlotClim.Rd b/man/PlotClim.Rd index 7ee001ee198eb506524cde9c967557dc3ec9ebc1..35ab17d1028933b98f4178e6773b26d61bee751f 100644 --- a/man/PlotClim.Rd +++ b/man/PlotClim.Rd @@ -81,10 +81,4 @@ PlotClim(clim$clim_exp, clim$clim_obs, toptitle = paste('climatologies'), } } -\author{ -History:\cr -0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN -} -\keyword{datagen} diff --git a/man/PlotEquiMap.Rd b/man/PlotEquiMap.Rd index cb33fc60f2562236a2ef4025c056f28977a15bcc..cf45ead4b3555ba1417ebcc18d7b01140550b11c 100644 --- a/man/PlotEquiMap.Rd +++ b/man/PlotEquiMap.Rd @@ -250,7 +250,7 @@ bar is disabled. \examples{ # See examples on Load() to understand the first lines in this example \dontrun{ -data_path <- system.file('sample_data', package = 's2dverification') +data_path <- system.file('sample_data', package = 's2dv') expA <- list(name = 'experiment', path = file.path(data_path, 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', '$VAR_NAME$_$START_DATE$.nc')) @@ -266,7 +266,7 @@ sampleData <- Load('tos', list(expA), list(obsX), startDates, } \dontshow{ startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), +sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), c('observation'), startDates, leadtimemin = 1, leadtimemax = 4, @@ -278,14 +278,4 @@ PlotEquiMap(sampleData$mod[1, 1, 1, 1, , ], sampleData$lon, sampleData$lat, toptitle = 'Predicted sea surface temperature for Nov 1960 from 1st Nov', sizetit = 0.5) } -\author{ -History:\cr - 0.1 - 2011-11 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr - 0.2 - 2013-04 (R. Saurral \email{ramiro.saurral@ic3.cat}) - LabW\cr - 1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to R CRAN\cr - 1.1 - 2013-09 (C. Prodhomme, \email{chloe.prodhomme@ic3.cat}) - add winds\cr - 1.2 - 2016-08 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Refactored and added features, - and adapted to new ColorBar. -} -\keyword{dynamic} diff --git a/man/PlotLayout.Rd b/man/PlotLayout.Rd index e4cf4ecfd7edd11be269ccd363618247b03ddf2e..f01fdf9ba3a823bc5c151f8eed280136f62be9fb 100644 --- a/man/PlotLayout.Rd +++ b/man/PlotLayout.Rd @@ -196,11 +196,11 @@ each of the plot functions see parameter 'special_args'.} This function takes an array or list of arrays and loops over each of them to plot all the sub-arrays they contain on an automatically generated multi-pannel layout. A different plot function (not necessarily from -s2dverification) can be applied over each of the provided arrays. The input +s2dv) can be applied over each of the provided arrays. The input dimensions of each of the functions have to be specified, either with the names or the indices of the corresponding input dimensions. It is possible to draw a common colour bar at any of the sides of the multi-pannel for all -the s2dverification plots that use a colour bar. Common plotting arguments +the s2dv plots that use a colour bar. Common plotting arguments for all the arrays in 'var' can be specified via the '...' parameter, and specific plotting arguments for each array can be fully adjusted via 'special_args'. It is possible to draw titles for each of the figures, @@ -214,7 +214,7 @@ nested in complex layouts. \examples{ # See examples on Load() to understand the first lines in this example \dontrun{ -data_path <- system.file('sample_data', package = 's2dverification') +data_path <- system.file('sample_data', package = 's2dv') expA <- list(name = 'experiment', path = file.path(data_path, 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', '$VAR_NAME$_$START_DATE$.nc')) @@ -230,7 +230,7 @@ sampleData <- Load('tos', list(expA), list(obsX), startDates, } \dontshow{ startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), +sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), c('observation'), startDates, leadtimemin = 1, leadtimemax = 4, @@ -244,9 +244,4 @@ PlotLayout(PlotEquiMap, c('lat', 'lon'), sampleData$mod[1, , 1, 1, , ], titles = paste('Member', 1:15)) } -\author{ -History:\cr - 0.1 - 2016-08 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Original code -} -\keyword{dynamic} diff --git a/man/PlotMatrix.Rd b/man/PlotMatrix.Rd index 70c1211e9dd333d8dc947671add5f6ef55c4f9cc..24f046d8d5d94170b3bda4ab0e51f6a72b9399be 100644 --- a/man/PlotMatrix.Rd +++ b/man/PlotMatrix.Rd @@ -72,7 +72,7 @@ creator function of the corresponding device.} to plot in. See ?Devices and the creator function of the corresponding device.} \item{...}{The additional parameters to be passed to function ColorBar() in -s2dverification for color legend creation.} +s2dv for color legend creation.} } \value{ A figure in popup window by default, or saved to the specified path. diff --git a/man/PlotSection.Rd b/man/PlotSection.Rd index f74473190242d1630ab85d8b0e7b0fdbefc387e5..413ef63223d806556fb1cb2a8ba42b64d9176bb2 100644 --- a/man/PlotSection.Rd +++ b/man/PlotSection.Rd @@ -65,14 +65,8 @@ For more information about the parameters see `par`.} Plot a (longitude,depth) or (latitude,depth) section. } \examples{ -sampleData <- s2dverification::sampleDepthData +sampleData <- s2dv::sampleDepthData PlotSection(sampleData$mod[1, 1, 1, 1, , ], sampleData$lat, sampleData$depth, toptitle = 'temperature 1995-11 member 0') } -\author{ -History:\cr -0.1 - 2012-09 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN -} -\keyword{dynamic} diff --git a/man/PlotStereoMap.Rd b/man/PlotStereoMap.Rd index 3bf2f69c44f509a9afbbcb34f6fa9f9dfcc0d198..4b910a90ce21857977ec676bc70d188232e78096 100644 --- a/man/PlotStereoMap.Rd +++ b/man/PlotStereoMap.Rd @@ -183,13 +183,4 @@ y <- seq(from = -90, to = 90, length.out = 50) PlotStereoMap(data, x, y, latlims = c(60, 90), brks = 50, toptitle = "This is the title") } -\author{ -History:\cr -1.0 - 2014-07 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -1.1 - 2015-12 (C. Ardilouze, \email{constantin.ardilouze@meteo.fr}) - Box(es) drawing\cr -1.2 - 2016-08 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Refacotred the function and - merged in Jean-Philippe circle - border and Constantin boxes. -} -\keyword{dynamic} diff --git a/man/PlotVsLTime.Rd b/man/PlotVsLTime.Rd deleted file mode 100644 index 2c71e9fac1915afa9d9fcba50006d360da2a44c7..0000000000000000000000000000000000000000 --- a/man/PlotVsLTime.Rd +++ /dev/null @@ -1,136 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/PlotVsLTime.R -\name{PlotVsLTime} -\alias{PlotVsLTime} -\title{Plots A Score Along The Forecast Time With Its Confidence Interval} -\usage{ -PlotVsLTime(var, toptitle = "", ytitle = "", monini = 1, freq = 12, - nticks = NULL, limits = NULL, listexp = c("exp1", "exp2", "exp3"), - listobs = c("obs1", "obs2", "obs3"), biglab = FALSE, hlines = NULL, - leg = TRUE, siglev = FALSE, sizetit = 1, show_conf = TRUE, - fileout = "output_plotvsltime.eps", width = 8, height = 5, - size_units = "in", res = 100, ...) -} -\arguments{ -\item{var}{Matrix containing any Prediction Score with dimensions:\cr -(nexp/nmod, 3/4 ,nltime)\cr -or (nexp/nmod, nobs, 3/4 ,nltime).} - -\item{toptitle}{Main title, optional.} - -\item{ytitle}{Title of Y-axis, optional.} - -\item{monini}{Starting month between 1 and 12. Default = 1.} - -\item{freq}{1 = yearly, 12 = monthly, 4 = seasonal, ... Default = 12.} - -\item{nticks}{Number of ticks and labels on the x-axis, optional.} - -\item{limits}{c(lower limit, upper limit): limits of the Y-axis, optional.} - -\item{listexp}{List of experiment names, optional.} - -\item{listobs}{List of observation names, optional.} - -\item{biglab}{TRUE/FALSE for presentation/paper plot. Default = FALSE.} - -\item{hlines}{c(a,b, ..) Add horizontal black lines at Y-positions a,b, ...\cr -Default = NULL.} - -\item{leg}{TRUE/FALSE if legend should be added or not to the plot. -Default = TRUE.} - -\item{siglev}{TRUE/FALSE if significance level should replace confidence -interval.\cr -Default = FALSE.} - -\item{sizetit}{Multiplicative factor to change title size, optional.} - -\item{show_conf}{TRUE/FALSE to show/not confidence intervals for input -variables.} - -\item{fileout}{Name of output file. Extensions allowed: eps/ps, jpeg, png, -pdf, bmp and tiff.\cr -Default = 'output_plotvsltime.eps'} - -\item{width}{File width, in the units specified in the parameter size_units -(inches by default). Takes 8 by default.} - -\item{height}{File height, in the units specified in the parameter -size_units (inches by default). Takes 5 by default.} - -\item{size_units}{Units of the size of the device (file or window) to plot -in. Inches ('in') by default. See ?Devices and the creator function of the -corresponding device.} - -\item{res}{Resolution of the device (file or window) to plot in. See -?Devices and the creator function of the corresponding device.} - -\item{...}{Arguments to be passed to the method. Only accepts the following -graphical parameters:\cr -adj ann ask bg bty cex.sub cin col.axis col.lab col.main col.sub cra crt -csi cxy err family fg fig font font.axis font.lab font.main font.sub -lheight ljoin lmitre mar mex mfcol mfrow mfg mkh oma omd omi page pch plt -smo srt tck tcl usr xaxp xaxs xaxt xlog xpd yaxp yaxs yaxt ylbias ylog \cr -For more information about the parameters see `par`.} -} -\description{ -Plots The Correlation (\code{Corr()}) or the Root Mean Square Error -(\code{RMS()}) between the forecasted values and their observational -counterpart or the slopes of their trends (\code{Trend()}) or the -InterQuartile Range, Maximum-Mininum, Standard Deviation or Median Absolute -Deviation of the Ensemble Members (\code{Spread()}), or the ratio between -the Ensemble Spread and the RMSE of the Ensemble Mean (\code{RatioSDRMS()}) -along the forecast time for all the input experiments on the same figure -with their confidence intervals. -} -\details{ -Examples of input:\cr -Model and observed output from \code{Load()} then \code{Clim()} then -\code{Ano()} then \code{Smoothing()}:\cr -(nmod, nmemb, nsdate, nltime) and (nobs, nmemb, nsdate, nltime)\cr -then averaged over the members\cr -\code{Mean1Dim(var_exp/var_obs, posdim = 2)}:\cr -(nmod, nsdate, nltime) and (nobs, nsdate, nltime)\cr -then passed through\cr - \code{Corr(exp, obs, posloop = 1, poscor = 2)} or\cr - \code{RMS(exp, obs, posloop = 1, posRMS = 2)}:\cr - (nmod, nobs, 3, nltime)\cr -would plot the correlations or RMS between each exp & each obs as a function -of the forecast time. -} -\examples{ -# Load sample data as in Load() example: -example(Load) -clim <- Clim(sampleData$mod, sampleData$obs) -ano_exp <- Ano(sampleData$mod, clim$clim_exp) -ano_obs <- Ano(sampleData$obs, clim$clim_obs) -runmean_months <- 12 -dim_to_smooth <- 4 # Smooth along lead-times -smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth) -smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth) -dim_to_mean <- 2 # Mean along members -required_complete_row <- 3 # Discard startdates for which there are NA leadtimes -leadtimes_per_startdate <- 60 -corr <- Corr(Mean1Dim(smooth_ano_exp, dim_to_mean), - Mean1Dim(smooth_ano_obs, dim_to_mean), - compROW = required_complete_row, - limits = c(ceiling((runmean_months + 1) / 2), - leadtimes_per_startdate - floor(runmean_months / 2))) - \donttest{ -PlotVsLTime(corr, toptitle = "correlations", ytitle = "correlation", - monini = 11, limits = c(-1, 2), listexp = c('CMIP5 IC3'), - listobs = c('ERSST'), biglab = FALSE, hlines = c(-1, 0, 1), - fileout = 'tos_cor.eps') - } - -} -\author{ -History:\cr -0.1 - 2011-03 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -0.2 - 2013-03 (I. Andreu-Burillo, \email{isabel.andreu-burillo@ic3.cat}) - Introduced parameter sizetit\cr -0.3 - 2013-10 (I. Andreu-Burillo, \email{isabel.andreu-burillo@ic3.cat}) - Introduced parameter show_conf\cr -1.0 - 2013-11 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN -} -\keyword{dynamic} - diff --git a/man/RMS.Rd b/man/RMS.Rd index ebbe544fb9805e7e5fb5270ce60a329b0779666d..ac546867c422cdfac3e703279e437934267c1e9b 100644 --- a/man/RMS.Rd +++ b/man/RMS.Rd @@ -64,30 +64,15 @@ The confidence interval is computed by the chi2 distribution.\cr } \examples{ # Load sample data as in Load() example: -example(Load) -clim <- Clim(sampleData$mod, sampleData$obs) -ano_exp <- Ano(sampleData$mod, clim$clim_exp) -ano_obs <- Ano(sampleData$obs, clim$clim_obs) -runmean_months <- 12 -dim_to_smooth <- 4 # Smooth along lead-times -smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth) -smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth) -dim_to_mean <- 2 # Mean along members -# Discard start-dates for which some leadtimes are missing -leadtimes_per_startdate <- 60 -rms <- RMS(smooth_ano_exp, - smooth_ano_obs, - comp_dim = 'ftime', - limits = c(ceiling((runmean_months + 1) / 2), - leadtimes_per_startdate - floor(runmean_months / 2))) + set.seed(1) + exp1 <- array(rnorm(120), dim = c(member = 3, sdate = 5, ftime = 2, lon = 1, lat = 4)) + set.seed(2) + obs1 <- array(rnorm(80), dim = c(member = 2, sdate = 5, ftime = 2, lon = 1, lat = 4)) + set.seed(2) + na <- floor(runif(10, min = 1, max = 80)) + obs1[na] <- NA + res <- RMS(exp1, obs1, comp_dim = 'ftime') + # Renew example when Ano and Smoothing are ready } -\author{ -History:\cr -0.1 - 2011-05 (V. Guemas, \email{vguemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens2@ic3.cat}) - Formatting to R CRAN\cr -1.1 - 2017-02 (A. Hunter, \email{alasdair.hunter@bsc.es}) - Adapted to veriApply() -3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Adapt multiApply feature -} -\keyword{datagen} diff --git a/man/RMSSS.Rd b/man/RMSSS.Rd index 240a74633342e0e8a449517443cccba652040efe..3b9c1be48f6aee17c4ce51c11a01f8dca28f3559 100644 --- a/man/RMSSS.Rd +++ b/man/RMSSS.Rd @@ -62,12 +62,4 @@ obs <- array(rnorm(6), dim = c(time = 3, member = 2, dat = 1)) res <- RMSSS(exp, obs, time_dim = 'time') } -\author{ -History:\cr -0.1 - 2012-04 (V. Guemas, \email{vguemas@bsc.es}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to R CRAN\cr -1.1 - 2017-02 (A. Hunter, \email{alasdair.hunter@bsc.es}) - Adapted to veriApply() -3.0 - 2019-12 (A. Ho, \email{an.ho@bsc.es}) - Adopt multiApply feature -} -\keyword{datagen} diff --git a/man/Regression.Rd b/man/Regression.Rd index f6a28a254f48efb2fc7da9080c81bcec4bece186..cbb28759448945db58750f6d3044745e14cb8cde 100644 --- a/man/Regression.Rd +++ b/man/Regression.Rd @@ -38,6 +38,7 @@ regression. The default value is na.omit-} computation. Default value is NULL.} } \value{ +A list containing: \item{$regression}{ A numeric array with same dimensions as parameter 'datay' and 'datax' except the 'time_dim' dimension, which is replaced by a 'stats' dimension containing @@ -83,19 +84,12 @@ on the student-T distribution. \examples{ # Load sample data as in Load() example: example(Load) -datay <- sampleData$mod -datax <- sampleData$obs -datay <- Subset(datay, 'member', 2) +datay <- sampleData$mod[, 1, , ] +names(dim(datay)) <- c('sdate', 'ftime') +datax <- sampleData$obs[, 1, , ] +names(dim(datax)) <- c('sdate', 'ftime') res1 <- Regression(datay, datax, formula = y~poly(x, 2, raw = TRUE)) res2 <- Regression(datay, datax, conf.lev = 0.9) } -\author{ -History:\cr -0.1 - 2013-05 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN -2.0 - 2019-10 (N. Perez-Zanon, \email{nuria.perez@bsc.es}) - Formatting to multiApply -3.0 - 2020-01 (A. Ho, \email{an.ho@bsc.es}) - Adapt multiApply feature -} -\keyword{datagen} diff --git a/man/Reorder.Rd b/man/Reorder.Rd index 6b44810364f53b92605d25f43de04134c71730e3..0afa07ea27990f673828ea1d32547c2d993ffde1 100644 --- a/man/Reorder.Rd +++ b/man/Reorder.Rd @@ -9,9 +9,8 @@ Reorder(data, order) \arguments{ \item{data}{An array of which the dimension to be reordered.} -\item{posdim}{An integer indicating the position of the new dimension.} - -\item{lendim}{An integer indicating the length of the new dimension.} +\item{order}{A vector of indices or character strings indicating the new +order of the dimension.} } \value{ An array which has the same values as parameter 'data' but with @@ -27,8 +26,4 @@ Reorder the dimension order of a multi-dimensional array dat2 <- array(c(1:10), dim = c(2, 1, 5)) print(dim(Reorder(dat2, c(2, 1, 3)))) } -\author{ -History:\cr -} -\keyword{datagen} diff --git a/man/ToyModel.Rd b/man/ToyModel.Rd index ca47b4498808f86c340cfb70b6bdb0670560a0fa..64feac8d0a10d99e1154991197a8337b9a2b4f6b 100644 --- a/man/ToyModel.Rd +++ b/man/ToyModel.Rd @@ -82,7 +82,7 @@ toyforecast <- ToyModel(alpha = a, beta = b, gamma = g, sig = sig, trend = t, # Example 2: Generate forecast from loaded observations # Decadal prediction example \dontrun{ -data_path <- system.file('sample_data', package = 's2dverification') +data_path <- system.file('sample_data', package = 's2dv') expA <- list(name = 'experiment', path = file.path(data_path, 'model/$EXP_NAME$/$STORE_FREQ$_mean/$VAR_NAME$_3hourly', '$VAR_NAME$_$START_DATE$.nc')) @@ -98,7 +98,7 @@ sampleData <- Load('tos', list(expA), list(obsX), startDates, } \dontshow{ startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') -sampleData <- s2dverification:::.LoadSampleData('tos', c('experiment'), +sampleData <- s2dv:::.LoadSampleData('tos', c('experiment'), c('observation'), startDates, output = 'areave', latmin = 27, latmax = 48, @@ -119,10 +119,4 @@ PlotAno(toyforecast$mod, toyforecast$obs, startDates, } } -\author{ -History:\cr -1.0 - 2014-08 (O.Bellprat) - Original code -1.1 - 2016-02 (O.Bellprat) - Include security check for parameters -} -\keyword{datagen} diff --git a/man/Trend.Rd b/man/Trend.Rd index f058009aa6b7fe56ce5591e7edc211a4d12e2477..e9c890e4d6e7baa2a17a529b053baa952a39547b 100644 --- a/man/Trend.Rd +++ b/man/Trend.Rd @@ -30,6 +30,7 @@ regression computation. The default value is 0.95.} computation. The default value is NULL.} } \value{ +A list containing: \item{$trend}{ A numeric array with the first dimension 'stats', followed by the same dimensions as parameter 'data' except the 'time_dim' dimension. The length @@ -71,12 +72,4 @@ months_between_startdates <- 60 trend <- Trend(sampleData$obs, polydeg = 2) } -\author{ -History:\cr -0.1 - 2011-05 (V. Guemas, \email{virginie.guemas@ic3.cat}) - Original code\cr -1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN\cr -2.0 - 2017-02 (A. Hunter, \email{alasdair.hunter@bsc.es}) - Adapt to veriApply() -3.0 - 2019-12 (A. Ho, \email{an.ho@bsc.es}) - Adapt multiApply feature -} -\keyword{datagen} diff --git a/man/clim.palette.Rd b/man/clim.palette.Rd index 95f8407c38a0b3791540a5ffff7fe0dccef8bd09..d912f47a346c3e9a0d3255fe7fe9f1774cc1a58c 100644 --- a/man/clim.palette.Rd +++ b/man/clim.palette.Rd @@ -30,9 +30,4 @@ cols <- clim.colors(20) ColorBar(lims, cols) } -\author{ -History:\cr -0.0 - 2016-01 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Original code. -} -\keyword{datagen} diff --git a/man/s2dv-package.Rd b/man/s2dv-package.Rd new file mode 100644 index 0000000000000000000000000000000000000000..7cd54795bcc3cabf8a7aa39492516bfd73b7aff6 --- /dev/null +++ b/man/s2dv-package.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/s2dv-package.R +\docType{package} +\name{s2dv-package} +\alias{s2dv} +\alias{s2dv-package} +\title{Set of Common Tools for Seasonal to Decadal Verification} +\description{ +The advanced version of package 's2dverification'. It is +intended for 'seasonal to decadal' (s2d) climate forecast verification, but +it can also be used in other kinds of forecasts or general climate analysis. +This package is specially designed for the comparison between the experimental +and observational datasets. The functionality of the included functions covers +from data retrieval, data post-processing, skill scores against obeservation, +to visualization. Compared to 's2dverification', 's2dv' adopts the regime of +package 'multiApply'. Therefore, it can use multi-core for computation and work +with multi-dimensional arrays with a higher level of flexibility. +} +\references{ +\url{https://earth.bsc.es/gitlab/es/s2dverification/} +} +\keyword{internal} + diff --git a/man/sampleDepthData.Rd b/man/sampleDepthData.Rd new file mode 100644 index 0000000000000000000000000000000000000000..869af86eff4767dc415ecc948852362bdd7ed76e --- /dev/null +++ b/man/sampleDepthData.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/sampleDepthData.R +\docType{data} +\name{sampleDepthData} +\alias{sampleDepthData} +\title{Sample of Experimental Data for Forecast Verification In Function Of +Latitudes And Depths} +\format{The data set provides with a variable named 'sampleDepthData'.\cr\cr + +sampleDepthData$exp is an array that contains the experimental data and the +dimension meanings and values are:\cr + c(# of experimental datasets, # of members, # of starting dates, + # of lead-times, # of depths, # of latitudes)\cr + c(1, 5, 3, 60, 7, 21)\cr\cr + +sampleDepthData$obs should be an array that contained the observational data +but in this sample is not defined (NULL).\cr\cr + +sampleDepthData$depths is an array with the 7 longitudes covered by the data.\cr\cr + +sampleDepthData$lat is an array with the 21 latitudes covered by the data.\cr\cr} +\usage{ +data(sampleDepthData) +} +\description{ +This data set provides data in function of latitudes and depths for the +variable 'tos', i.e. sea surface temperature, from the decadal climate +prediction experiment run at IC3 in the context of the CMIP5 project.\cr +Its name within IC3 local database is 'i00k'. +} + diff --git a/man/sampleMap.Rd b/man/sampleMap.Rd new file mode 100644 index 0000000000000000000000000000000000000000..651d18597c5dbbf40a55f411d4cd2c39c6bc6fcf --- /dev/null +++ b/man/sampleMap.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/sampleMap.R +\docType{data} +\name{sampleMap} +\alias{sampleMap} +\title{Sample Of Observational And Experimental Data For Forecast Verification In Function Of Longitudes And Latitudes} +\format{The data set provides with a variable named 'sampleMap'.\cr\cr + +sampleMap$mod is an array that contains the experimental data and the dimension meanings and values are:\cr + c(# of experimental datasets, # of members, # of starting dates, # of lead-times, # of latitudes, # of longitudes)\cr + c(1, 3, 5, 60, 2, 3)\cr\cr + +sampleMap$obs is an array that contains the observational data and the dimension meanings and values are:\cr + c(# of observational datasets, # of members, # of starting dates, # of lead-times, # of latitudes, # of longitudes)\cr + c(1, 1, 5, 60, 2, 3)\cr\cr + + sampleMap$lat is an array with the 2 latitudes covered by the data (see examples on Load() for details on why such low resolution).\cr\cr + + sampleMap$lon is an array with the 3 longitudes covered by the data (see examples on Load() for details on why such low resolution).} +\usage{ +data(sampleMap) +} +\description{ +This data set provides data in function of longitudes and latitudes for the variable 'tos', i.e. sea surface temperature, over the mediterranean zone from the sample experimental and observational datasets attached to the package. See examples on how to use Load() for details.\cr\cr +The data is provided through a variable named 'sampleMap' and is structured as expected from the 'Load()' function in the 's2dv' package if was called as follows:\cr\cr + \preformatted{ +data_path <- system.file('sample_data', package = 's2dv') +exp <- list( + name = 'experiment', + path = file.path(data_path, 'model/$EXP_NAME$/monthly_mean', + '$VAR_NAME$_3hourly/$VAR_NAME$_$START_DATES$.nc') + ) +obs <- list( + name = 'observation', + path = file.path(data_path, 'observation/$OBS_NAME$/monthly_mean', + '$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc') + ) +# Now we are ready to use Load(). +startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') +sampleData <- Load('tos', list(exp), list(obs), startDates, + leadtimemin = 1, leadtimemax = 4, output = 'lonlat', + latmin = 27, latmax = 48, lonmin = -12, lonmax = 40) + } +Check the documentation on 'Load()' in the package 's2dv' for more information. +} + diff --git a/man/sampleTimeSeries.Rd b/man/sampleTimeSeries.Rd new file mode 100644 index 0000000000000000000000000000000000000000..280277eb9ccc2e2ae7bf6e37439d01d716b8e3d7 --- /dev/null +++ b/man/sampleTimeSeries.Rd @@ -0,0 +1,50 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/sampleTimeSeries.R +\docType{data} +\name{sampleTimeSeries} +\alias{sampleTimeSeries} +\title{Sample Of Observational And Experimental Data For Forecast Verification As Area Averages} +\format{The data set provides with a variable named 'sampleTimeSeries'.\cr\cr + +sampleTimeSeries$mod is an array that contains the experimental data and the dimension meanings and values are:\cr + c(# of experimental datasets, # of members, # of starting dates, # of lead-times)\cr + c(1, 3, 5, 60)\cr\cr + +sampleTimeSeries$obs is an array that contains the observational data and the dimension meanings and values are:\cr + c(# of observational datasets, # of members, # of starting dates, # of lead-times)\cr + c(1, 1, 5, 60)\cr\cr + +sampleTimeSeries$lat is an array with the 2 latitudes covered by the data that was area averaged to calculate the time series (see examples on Load() for details on why such low resolution).\cr\cr + +sampleTimeSeries$lon is an array with the 3 longitudes covered by the data that was area averaged to calculate the time series (see examples on Load() for details on why such low resolution).} +\usage{ +data(sampleTimeSeries) +} +\description{ +This data set provides area averaged data for the variable 'tos', i.e. sea +surface temperature, over the mediterranean zone from the example datasets +attached to the package. See examples on Load() for more details.\cr\cr +The data is provided through a variable named 'sampleTimeSeries' and is +structured as expected from the 'Load()' function in the 's2dv' +package if was called as follows:\cr\cr + \preformatted{ +data_path <- system.file('sample_data', package = 's2dv') +exp <- list( + name = 'experiment', + path = file.path(data_path, 'model/$EXP_NAME$/monthly_mean', + '$VAR_NAME$_3hourly/$VAR_NAME$_$START_DATES$.nc') + ) +obs <- list( + name = 'observation', + path = file.path(data_path, 'observation/$OBS_NAME$/monthly_mean', + '$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc') + ) +# Now we are ready to use Load(). +startDates <- c('19851101', '19901101', '19951101', '20001101', '20051101') +sampleData <- Load('tos', list(exp), list(obs), startDates, + output = 'areave', latmin = 27, latmax = 48, lonmin = -12, + lonmax = 40) + } +Check the documentation on 'Load()' in the package 's2dv' for more information. +} +