diff --git a/autosubmit/auto-scorecards.sh b/autosubmit/auto-scorecards.sh index 4b5273725bed84811e1267048d035a0e2f712a28..c30f643f3be53f216ead66675a9545a0e159198a 100644 --- a/autosubmit/auto-scorecards.sh +++ b/autosubmit/auto-scorecards.sh @@ -2,8 +2,8 @@ ############ AUTOSUBMIT INPUTS ############ proj_dir=%PROJDIR% -outdir=%OUTDIR% -recipe=%RECIPE% +outdir=%common.OUTDIR% +recipe=%common.RECIPE% ############################### cd $proj_dir diff --git a/conf/archive.yml b/conf/archive.yml index cca68c22bd5d2d42b2af9964a1f0cbc44d77874b..dbacf0e724ef6fb85420638cc4d5f4f92317ad5f 100644 --- a/conf/archive.yml +++ b/conf/archive.yml @@ -54,7 +54,8 @@ esarchive: src: "exp/meteofrance/system7c3s/" monthly_mean: {"tas":"monthly_mean/tas_f6h/", "g500":"monthly_mean/g500_f12h/", "prlr":"monthly_mean/prlr_f24h/", "sfcWind": "monthly_mean/sfcWind_f6h/", - "tasmax":"monthly_mean/tasmax_f6h/", "tasmin": "monthly_mean/tasmin_f6h/"} + "tasmax":"monthly_mean/tasmax_f6h/", "tasmin": "monthly_mean/tasmin_f6h/", + "tos":"monthly_mean/tos_f6h/"} nmember: fcst: 51 hcst: 25 diff --git a/launch_SUNSET.sh b/launch_SUNSET.sh index eb8fcf4638e3c96d422db3aad0bfd0af2740885c..153d64b3cee49c24066ad298464615f984a2ce35 100644 --- a/launch_SUNSET.sh +++ b/launch_SUNSET.sh @@ -118,7 +118,7 @@ if [ $run_method == "sbatch" ]; then scorecards=$( head -4 $tmpfile | tail -1) # Create directory for slurm output - logdir=${codedir}/out-logs/slurm_logs/ + logdir=${outdir}/logs/slurm/ mkdir -p $logdir echo "Slurm job logs will be stored in $logdir" diff --git a/modules/Indices/R/compute_nino.R b/modules/Indices/R/compute_nino.R index 8fd2c9c8a90b2e947000d940684a659a68cb49af..915dc9cedd826e9733f0b8495dbb7f72ee8edcbb 100644 --- a/modules/Indices/R/compute_nino.R +++ b/modules/Indices/R/compute_nino.R @@ -208,6 +208,7 @@ compute_nino <- function(data, recipe, region, standardised = TRUE, } } if (plot_sp) { + ## TODO: Remove sourcing of plot robinson and viz module code source("modules/Visualization/R/tmp/PlotRobinson.R") source("modules/Indices/R/correlation_eno.R") source("modules/Visualization/R/get_proj_code.R") @@ -227,28 +228,36 @@ compute_nino <- function(data, recipe, region, standardised = TRUE, correl_hcst <- Apply(list(data$hcst$data, nino$hcst$data), target_dims = c('syear', 'ensemble'), fun = function(x, y) { - x <- apply(x, 1, mean, na.rm = TRUE) - y <- apply(y, 1, mean, na.rm = TRUE) - dim(y) <- c(syear = length(y)) - dim(x) <- c(syear = length(x)) - res <- .correlation_eno(x, y, - time_dim = 'syear', method = 'pearson', alpha = alpha, - test.type = 'two-sided', pval = FALSE)}, - ncores = recipe$Analysis$ncores) + x <- apply(x, 1, mean, na.rm = TRUE) + y <- apply(y, 1, mean, na.rm = TRUE) + dim(y) <- c(syear = length(y)) + dim(x) <- c(syear = length(x)) + res <- .correlation_eno(x, y, time_dim = 'syear', + method = 'pearson', + alpha = alpha, + test.type = 'two-sided', + pval = FALSE)}, + ncores = recipe$Analysis$ncores) correl_hcst_full <- Apply(list(data$hcst$data, nino$hcst$data), target_dims = c('syear', 'ensemble'), fun = function(x,y) { - dim(y) <- c(syear = length(y)) - dim(x) <- c(syear = length(x)) - res <- .correlation_eno(x, y, - time_dim = 'syear', method = 'pearson', alpha = alpha, - test.type = 'two-sided', pval = FALSE)}, + dim(y) <- c(syear = length(y)) + dim(x) <- c(syear = length(x)) + res <- .correlation_eno(x, y, + time_dim = 'syear', + method = 'pearson', + alpha = alpha, + test.type = 'two-sided', + pval = FALSE)}, ncores = recipe$Analysis$ncores) + months <- lubridate::month(Subset(data$hcst$attrs$Dates, "syear", indices = 1), + label = T, abb = F, locale = "en_GB") + for (tstep in 1:dim(nino$obs$data)['time']) { map <- Subset(correl_obs$r, along = 'time', ind = tstep, drop = T) sig <- Subset(correl_obs$sig, along = 'time', ind = tstep, drop = T) if (tolower(recipe$Analysis$Horizon) == "seasonal") { - mes <- as.numeric(substr(recipe$Analysis$Time$sdate, 1,2)) + + mes <- as.numeric(substr(recipe$Analysis$Time$sdate, 1, 2)) + (tstep - 1) + (recipe$Analysis$Time$ftime_min - 1) mes <- ifelse(mes > 12, mes - 12, mes) fmonth <- sprintf("%02d", tstep - 1 + recipe$Analysis$Time$ftime_min) @@ -313,7 +322,7 @@ compute_nino <- function(data, recipe, region, standardised = TRUE, toptitle <- paste(recipe$Analysis$Datasets$System$name, "\n", "Ni\u00F1o", region_name, "SST Index -",var_name, "\n", "Correlation /", - month.abb[as.numeric(fmonth)], + month.abb[mes], "/", recipe$Analysis$Time$hcst_start, "-", recipe$Analysis$Time$hcst_end) plotfile <- paste0(recipe$Run$output_dir, "/plots/Indices/", @@ -367,7 +376,7 @@ compute_nino <- function(data, recipe, region, standardised = TRUE, toptitle <- paste(recipe$Analysis$Datasets$System$name, "\n", "Ni\u00F1o", region_name, "SST Index -",var_name, "\n", " Correlation /", - month.abb[as.numeric(fmonth)], + month.abb[mes], "/", recipe$Analysis$Time$hcst_start, "-", recipe$Analysis$Time$hcst_end) plotfile <- paste0(recipe$Run$output_dir, "/plots/Indices/", diff --git a/use_cases/ex1_1_single_analysis_terminal/ex1_1-handson.md b/use_cases/ex1_1_single_analysis_terminal/ex1_1-handson.md index 391d48566c47fb78f26d7264372ce1e22c0aaf41..6315cd864087b2573dd4fb7ce8d27bdf27f04e54 100644 --- a/use_cases/ex1_1_single_analysis_terminal/ex1_1-handson.md +++ b/use_cases/ex1_1_single_analysis_terminal/ex1_1-handson.md @@ -16,7 +16,8 @@ git clone https://earth.bsc.es/gitlab/es/sunset.git SUNSET uses YAML configuration files called 'recipes' to specify which data you want to load and the details of the different steps of the workflow. In this example, we want to evaluate the temperature-at-surface (tas) monthly means, using MeteoFrance System 7 data as our experiment and ERA5 as our reference dataset, for the initialization month of November. -There is a template file +There is a template file for this hands-on tutorial, which you can open with a text editor: + ```shell # cd to the main SUNSET directory # Open the recipe with a text editor such as vim or emacs diff --git a/use_cases/ex1_2_autosubmit_scorecards/Figures/as_change_status.PNG b/use_cases/ex1_2_autosubmit_scorecards/Figures/as_change_status.PNG new file mode 100644 index 0000000000000000000000000000000000000000..5fc5cd8a3397a626824cbaa8fb9d3fecb090ce13 Binary files /dev/null and b/use_cases/ex1_2_autosubmit_scorecards/Figures/as_change_status.PNG differ diff --git a/use_cases/ex1_2_autosubmit_scorecards/Figures/as_tree.PNG b/use_cases/ex1_2_autosubmit_scorecards/Figures/as_tree.PNG new file mode 100644 index 0000000000000000000000000000000000000000..5194568e387d7251c478eed0cf071f7d7c7a4a60 Binary files /dev/null and b/use_cases/ex1_2_autosubmit_scorecards/Figures/as_tree.PNG differ diff --git a/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-1_ECMWF-SEAS5_ERA5_tas_1993-2003.png b/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-1_ECMWF-SEAS5_ERA5_tas_1993-2003.png new file mode 100644 index 0000000000000000000000000000000000000000..631e3e2f80955aaf2b5dbd8abb2e577a73746373 Binary files /dev/null and b/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-1_ECMWF-SEAS5_ERA5_tas_1993-2003.png differ diff --git a/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-2_ECMWF-SEAS5_ERA5_tas_1993-2003.png b/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-2_ECMWF-SEAS5_ERA5_tas_1993-2003.png new file mode 100644 index 0000000000000000000000000000000000000000..e311079536dc83ccb94c2cd786cebc44b46694ed Binary files /dev/null and b/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-2_ECMWF-SEAS5_ERA5_tas_1993-2003.png differ diff --git a/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-3_ECMWF-SEAS5_ERA5_tas_1993-2003.png b/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-3_ECMWF-SEAS5_ERA5_tas_1993-2003.png new file mode 100644 index 0000000000000000000000000000000000000000..356243497a01f392ce21575d9df17c1a1d2b1796 Binary files /dev/null and b/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-3_ECMWF-SEAS5_ERA5_tas_1993-2003.png differ diff --git a/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-4_ECMWF-SEAS5_ERA5_tas_1993-2003.png b/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-4_ECMWF-SEAS5_ERA5_tas_1993-2003.png new file mode 100644 index 0000000000000000000000000000000000000000..54e1a5b1825b1a00672360dd86f3c8c15a86380c Binary files /dev/null and b/use_cases/ex1_2_autosubmit_scorecards/Figures/scorecard-4_ECMWF-SEAS5_ERA5_tas_1993-2003.png differ diff --git a/use_cases/ex1_2_autosubmit_scorecards/ex1_2-handson.md b/use_cases/ex1_2_autosubmit_scorecards/ex1_2-handson.md new file mode 100644 index 0000000000000000000000000000000000000000..7ea0bfa2f0a0bf5e98a51c1a1f2ce1c827386d58 --- /dev/null +++ b/use_cases/ex1_2_autosubmit_scorecards/ex1_2-handson.md @@ -0,0 +1,171 @@ +# Hands-on 1.2: Computation of Scorecards with Autosubmit + +## Goal +Compute some skill metrics and plots scorecards with SUNSET, using Autosubmit to dispatch jobs in parallel. +In the recipe, we request 12 start dates (0101, 0201, ..., 1201). SUNSET will split the recipe into 12 atomic recipes, and Autosubmit will run 12 jobs, which process the verification, for each recipe in parallel. +When all the verification jobs are finished, the scorecard job will be triggered and produces the scorecards. + +We only use one variable, one model and one reference dataset in this example, but you can add more datasets and variables if needed, and SUNSET will split the recipes accordingly. + +Check GitLab Wiki: +- Autosubmit page for full explanation of using SUNSET with Autosubmit https://earth.bsc.es/gitlab/es/sunset/-/wikis/Autosubmit + +- Home page Scorecards module section to know more about scorecards https://earth.bsc.es/gitlab/es/sunset/-/wikis/home#scorecards-module + + +## 0. Cloning the SUNSET repository + +If you're completely new to SUNSET, the first step is to create a copy of the tool in your local environment. +Open a terminal and `cd` to the directory where you would like to store your local copy of SUNSET. For example: `/esarchive/scratch//git/`. If a directory does not exist yet, you can create it with the `mkdir` shell command. + +```shell +# Clone the GitLab repository to create a local copy of the code +git clone https://earth.bsc.es/gitlab/es/sunset.git +``` +You should see a git folder "sunset" under the current directory. Now you have all the code, recipes, and scripts for running SUNSET. + + +## 1. Create Autosubmit experiment + +Since we're going to use Autosubmit to dispatch jobs, we need to have an Autosubmit experiment. Note that SUNSET uses Autosubmit >= 4.0.0. + +On the workstation or Nord3, you can create an experiment by the following commands. + +```shell +module load autosubmit/4.0.0b-foss-2015a-Python-3.7.3 +autosubmit expid -H nord3v2 -d "SUNSET use case 1_2" +``` +You will see the messages like below: + +```shell +Autosubmit is running with 4.0.0b +The new experiment "a6pc" has been registered. +Generating folder structure... +Experiment folder: /esarchive/autosubmit/a6pc +Generating config files... +Experiment a6pc created +``` +Note the experiment ID (in this snippet above, a6pc) down. We need it for the recipe later. + + +## 2. Modifying the recipe + +The template recipe for this use case can be found in [ex1_2-recipe.yml](use_cases/ex1_2_autosubmit_scorecards/ex1_2-recipe.yml). +You should at least edit some items in the "Run" section: +- `output_dir`: The directory you want to save the outputs and logs +- `code_dir`: The directory where your SUNSET code is stored (i.e., the git folder) +- `auto_conf$script`: The path to the script ex1_2-recipe.yml +- `auto_conf$expid`: The experiment "xxxx" you just created +- `auto_conf$hpc_user`: You user ID on Nord3, which should be bsc32xxx +- `auto_conf$email_address`: Your email. You can also adjust other email notification parts up to your preference. + +In the recipe, we ask for anomaly calculation after loading the data, calculate the skill scores and save the result for scorecards. In the Scorecard section, three regions are requested. + +Feel free to also modify other aspects according to your particular needs. You can read more about the parameters and the available modules in the SUNSET GitLab wiki. + +## 3. The user-defined script + +We need to have a script to define the modules to use and the steps of the workflow. Note that the script is for data loading and verification parts. The Scorecards module doesn't need to be included in this script. + +The prepare_outputs() function is already incorporated into the launcher script (see the next section for details about launcher), so it does not need to be included in the user-defined script in this case. +In its place, we will use the function read_atomic_recipe(). The recipe path is passed as an argument onto the R script. The beginning of our script should look like this: + +```R +# Load modules +source("modules/Loading/Loading.R") +source("modules/Anomalies/Anomalies.R") +source("modules/Indices/Indices.R") +source("modules/Skill/Skill.R") + +# Read recipe +args = commandArgs(trailingOnly = TRUE) +recipe_file <- args[1] +recipe <- read_atomic_recipe(recipe_file) +``` + +The rest of the user-defined script can be written in the same way as any other SUNSET script. We load the data, calculate the anomalies, then compute the skill scores and save the result as netCDF files for Scorecards. + +```R +# Load data +data <- Loading(recipe) +# Compute tos anomalies +data <- Anomalies(recipe, data) +# Compute skill metrics +skill_metrics <- Skill(recipe, data) +``` +Check the example script at [ex1_2-script.yml](use_cases/ex1_2_autosubmit_scorecards/ex1_2-script.R). +You can execute it as-is or copy it and modify it according to your needs. + + +## 4. Launch jobs and Use Autosubmit + +We will start the jobs with the launcher. The SUNSET Launcher is a bash script named launch_SUNSET.sh that can be found in the main directory of the SUNSET repository. It runs in two steps: + +1. Run the recipe checks, split the recipe into atomic recipes and create the directory for the outputs. +2. Modify the Autosubmit configuration of your experiment according to the parameters in the recipe. + +The bash script needs two inputs: (1) [recipe](#2-modifying-the-recipe) (2) [R script](#2-modifying-the-recipe). + + On your workstation or Nord3 under the SUNSET code directory, run: + +```shell +bash launch_SUNSET.sh use_cases/ex1_2_autosubmit_scorecards/ex1_2-recipe.yml use_cases/ex1_2_autosubmit_scorecards/ex1_2-script.R +``` +You will see the messages similar to below: +```shell +[1] "Saving all outputs to:" +[1] "/esarchive/scratch/aho/auto-s2s-outputs/ex1_2-recipe_20231129003740" +INFO [2023-11-29 00:37:41] Checking recipe: use_cases/ex1_2_autosubmit_scorecards/ex1_2-recipe.yml +WARN [2023-11-29 00:37:41] The element 'fcst_year' is not defined in the recipe. No forecast year will be used. +INFO [2023-11-29 00:37:41] ##### RECIPE CHECK SUCCESSFULL ##### +INFO [2023-11-29 00:37:41] Splitting recipe in single verifications. +INFO [2023-11-29 00:37:41] The main recipe has been divided into 12 atomic recipes. +INFO [2023-11-29 00:37:41] Check output directory /esarchive/scratch/aho/auto-s2s-outputs//ex1_2-recipe_20231129003740/logs/recipes/ to see all the individual atomic recipes. +INFO [2023-11-29 00:37:41] ##### AUTOSUBMIT CONFIGURATION WRITTEN FOR a6pc ##### +INFO [2023-11-29 00:37:41] You can check your experiment configuration at: /esarchive/autosubmit/a6pc/conf/ +INFO [2023-11-29 00:37:41] Please SSH into bscesautosubmit01 or bscesautosubmit02 and run the following commands: +INFO [2023-11-29 00:37:41] module load autosubmit/4.0.0b-foss-2015a-Python-3.7.3 +INFO [2023-11-29 00:37:41] autosubmit create a6pc +INFO [2023-11-29 00:37:41] autosubmit refresh a6pc +INFO [2023-11-29 00:37:41] nohup autosubmit run a6pc & disown +``` +You can see some useful information, like the the path to atomic recipes, the Autosubmit configuration files, and most importantly, follow the last lines to launch your experiment. + +```shell +ssh bscesautosubmit01.bsc.es +(enter Autosubmit machine) +module load autosubmit/4.0.0b-foss-2015a-Python-3.7.3 +autosubmit create a6pc +autosubmit refresh a6pc +nohup autosubmit run a6pc & disown +``` + +Then, you can go to [Autosubmit GUI](https://earth.bsc.es/autosubmitapp/) to check the experiment status. + + + +As you can see, the Scorecards job is dependent on the Verification jobs. Once the 12 verification jobs are finished, the Scorecards job will start. + +## 5. Results and plots + +The scorecards are saved under `plots/Scorecards` under the output directory. There will be 4 files (_more explanation here_) + + + + + + + +## 6. Rerun Autosubmit + +If something goes wrong and makes the jobs fail, you can rerun the failed jobs only. + +1. Go to Autosubmit GUI, select the failed job(s), click "CHANGE STATUS" +2. Select "Set status to:" as "WAITING". Copy the lines and run them on Autosubmit machine or workstation. +3. Fix the problem under your local SUNSET git directory. +4. Run `autosubmit refresh xxxx` and `nohup autosubmit run xxxx & disown`. + + + +If everything fails, you can also simply recreate the experiment by `autosubmit create xxxx` --> `autosubmit refresh xxxx` --> `nohup autosubmit run xxxx & disown`. + diff --git a/use_cases/ex1_2_autosubmit_scorecards/ex1_2-recipe.yml b/use_cases/ex1_2_autosubmit_scorecards/ex1_2-recipe.yml new file mode 100644 index 0000000000000000000000000000000000000000..73f16311f93cf69aa439945b00715b7d29f80afa --- /dev/null +++ b/use_cases/ex1_2_autosubmit_scorecards/ex1_2-recipe.yml @@ -0,0 +1,96 @@ +Description: + Author: An-Chi Ho + Info: Compute Skills and Plot Scorecards with Autosubmit + +Analysis: + Horizon: seasonal + Variables: + - {name: tas, freq: monthly_mean} + Datasets: + System: # multiple systems for single model, split if Multimodel = F + - {name: ECMWF-SEAS5} + Multimodel: False # single option + Reference: + - {name: ERA5} + Time: + sdate: # list, split + - '0101' + - '0201' + - '0301' + - '0401' + - '0501' + - '0601' + - '0701' + - '0801' + - '0901' + - '1001' + - '1101' + - '1201' + fcst_year: + hcst_start: '1993' # single option + hcst_end: '2003' # single option + ftime_min: 1 # single option + ftime_max: 6 # single option + Region: # multiple lists, split? Add region name if length(Region) > 1 + - {name: "global", latmin: -90, latmax: 90, lonmin: 0, lonmax: 359.9} + Regrid: + method: bilinear + type: to_system + Workflow: + Anomalies: + compute: yes + cross_validation: no + save: 'none' + Calibration: + method: raw + save: 'none' + Skill: + metric: mean_bias EnsCorr rps rpss crps crpss EnsSprErr # list, don't split + cross_validation: yes + save: 'all' + Probabilities: + percentiles: [[1/3, 2/3]] + save: 'none' + Scorecards: + execute: yes # yes/no + regions: + Extra-tropical NH: {lon.min: 0, lon.max: 360, lat.min: 30, lat.max: 90} + Tropics: {lon.min: 0, lon.max: 360, lat.min: -30, lat.max: 30} + Extra-tropical SH : {lon.min: 0, lon.max: 360, lat.min: -30, lat.max: -90} + start_months: 'all' + metric: mean_bias enscorr rpss crpss enssprerr + metric_aggregation: 'score' + inf_to_na: TRUE # Optional, bool: set inf values in data to NA, default is FALSE table_label: NULL + fileout_label: NULL + col1_width: NULL + col2_width: NULL + calculate_diff: FALSE + ncores: 8 + remove_NAs: no # bool, don't split + Output_format: Scorecards # string, don't split + +################################################################################ +## Run CONFIGURATION +################################################################################ +Run: + Loglevel: INFO + Terminal: yes + filesystem: esarchive + output_dir: /esarchive/scratch/aho/auto-s2s-outputs/ + code_dir: /esarchive/scratch/aho/git/auto-s2s/ + autosubmit: yes + # fill only if using autosubmit + auto_conf: + script: /esarchive/scratch/aho/git/auto-s2s/use_cases/ex1_2_autosubmit_scorecards/ex1_2-script.R # replace with the path to your script + expid: a6pc # replace with your EXPID + hpc_user: bsc32734 # replace with your hpc username + wallclock: 03:00 # hh:mm + processors_per_job: 8 + platform: nord3v2 + custom_directives: ['#SBATCH --exclusive'] + email_notifications: yes # enable/disable email notifications. Change it if you want to. + email_address: an.ho@bsc.es # replace with your email address + notify_completed: yes # notify me by email when a job finishes + notify_failed: yes # notify me by email when a job fails + + diff --git a/use_cases/ex1_2_autosubmit_scorecards/ex1_2-script.R b/use_cases/ex1_2_autosubmit_scorecards/ex1_2-script.R new file mode 100644 index 0000000000000000000000000000000000000000..1f60798736ae021e55face318444f62149c2aec2 --- /dev/null +++ b/use_cases/ex1_2_autosubmit_scorecards/ex1_2-script.R @@ -0,0 +1,25 @@ +############################################################################### +## Author: An-Chi Ho +## Description: Computes some skill metrics and plots scorecards with Autosubmit +## Instructions: Follow the steps described in: +## use_cases/ex1_2_autosubmit_scorecards/ex1_2-handson.md +## This script should be called by bash script launch_SUNSET.sh. +############################################################################### + +# Load modules +source("modules/Loading/Loading.R") +source("modules/Anomalies/Anomalies.R") +source("modules/Skill/Skill.R") + +# Read recipe +args = commandArgs(trailingOnly = TRUE) +recipe_file <- args[1] +recipe <- read_atomic_recipe(recipe_file) + +# Load data +data <- Loading(recipe) +# Compute tos anomalies +data <- Anomalies(recipe, data) +# Compute skill metrics +skill_metrics <- Skill(recipe, data) + diff --git a/use_cases/ex1_3_nino_indices_comparison/ex1_3-handson.md b/use_cases/ex1_3_nino_indices_comparison/ex1_3-handson.md new file mode 100644 index 0000000000000000000000000000000000000000..9cd82bdf4833df1f7b7e884a81b2233dc38d49a0 --- /dev/null +++ b/use_cases/ex1_3_nino_indices_comparison/ex1_3-handson.md @@ -0,0 +1,106 @@ +# Hands-on 1.3: Computation of El Niño indices for two seasonal models + +## Goal +Create a SUNSET recipe to compute and evaluate the skill of several El Niño indices (Niño1+2, Niño3, Niño3.4 and Niño4) for two models: ECMWF-SEAS5 and MeteoFrance System 7. We include the information for both of the models in a single recipe, and the SUNSET Launcher will split the recipe into two 'atomic recipes': one for each model. The computation for atomic recipe will be run in the cluster as two separate jobs. + +It is also possible to split a recipe along different Reference datasets, Variables and Start Dates. + +## 0. Cloning the SUNSET repository + +The first step to use SUNSET is to create a copy of the code in your local environment. Open a terminal and `cd` to the directory where you would like to store your local copy of SUNSET. For example: `/esarchive/scratch//git/`. If a directory does not exist yet, you can create it with the `mkdir` shell command. + +```shell +# Clone the GitLab repository to create a local copy of the code +git clone https://earth.bsc.es/gitlab/es/sunset.git +``` + +## 1. Modifying the recipe + +The template recipe for this use case can be found in `use_cases/ex1_3_nino_indices_comparison/ex1_3-recipe.yml`. You should open it with an editor such as emacs or vim: + +```shell +# cd to the main SUNSET directory +# Open the recipe with a text editor such as vim or emacs +vim use_cases/ex1_3_nino_indices_comparison/ex1_3-recipe.yml +``` + +Then, under the 'Run' section of the recipe, you should edit the parameters `output_dir` and `code_dir` to point to your desire output directory and to the directory where your SUNSET code is stored, respectively. + +Feel free to also modify other aspects of the reicpe according to your particular needs. You can read more about the parameters and the available modules in the SUNSET wiki. + +## 2. The user-defined script + +The SUNSET Launcher is a bash script named launch_SUNSET.sh that can be found in the main directory of the SUNSET repository. When working without Autosubmit, it runs in two steps: + +1. Running the recipe checks, splitting the recipe into atomic recipes and creating the directory for the outputs. +2. Sending jobs to the cluster to run the user-defined script for each atomic recipe, using SLURM. + +The prepare_outputs() function is already incorporated into the first step. For that reason, it does not need to be included in the user-defined script in this particular case. In its place, we will use the function read_atomic_recipe(). The recipe path is passed as an argument onto the R script. The beginning of our script should look like this: + +```R +# Load the modules to be used +source("modules/Loading/Loading.R") +source("modules/Units/Units.R") +source("modules/Anomalies/Anomalies.R") +source("modules/Indices/Indices.R") +source("modules/Skill/Skill.R") + +# Define the recipe path as the first argument from the command line +args = commandArgs(trailingOnly = TRUE) +recipe_file <- args[1] +# Read the atomic recipe +recipe <- read_atomic_recipe(recipe_file) +``` + +The rest of the user-defined script can be written in the same way as any other SUNSET script: + +```R +# Load data +data <- Loading(recipe) +# Check units and transform if needed +data <- Units(recipe, data) +# Compute tos anomalies +data <- Anomalies(recipe, data) +# Compute Niño Indices +nino_indices <- Indices(data = data, recipe = recipe) + +# We can compute the Skill metrics for each of the El Niño indices, +# specifying that the data is spatially aggregated, with the parameter +# agg = "region". +for (index in nino_indices) { + nino_skill_metrics <- Skill(recipe = recipe, data = index, agg = "region") +} +``` + +A complete, ready-to-use sample of this example script can be found in `use_cases/ex1_3_nino_indices_comparison/ex1_3-script.R`. You can execute it as-is or copy it and modify it according to your specific needs. + +## 3. Launching the jobs with the SUNSET Launcher + +When working without Autosubmit, the SUNSET Launcher should be run directly from the HPC machine where the jobs will run(for example, Nord3v2). Make sure to ssh to the machine first. You can obtain detailed usage information by running: + +```shell +bash launch_SUNSET.sh --help +``` + +The mandatory arguments are the paths to the recipe and the script. We can also include other optional arguments to be used by SLURM, such as the number of CPUs to request (--cpus), the wallclock time for each job (--wallclock) and other extra directives (--custom_directives). You can refer to the [Nord3v2 user guide](https://www.bsc.es/user-support/nord3v2.php#jobdirectives) and the [SLURM sbatch documentation](https://slurm.schedmd.com/sbatch.html) for more information on the available options for the parameters. + +In this case, we are giving each job a wallclock time of 1 hour and requesting exclusive usage of all the cores in one node. The shell command to run SUNSET will look like this: + +```shell +bash launch_SUNSET.sh use_cases/ex1_3_nino_indices_comparison/ex1_3-recipe.yml use_cases/ex1_3_nino_indices_comparison/ex1_3-script.R --wallclock=01:00:00 --custom_directives="--exclusive" +``` + +You can check the status of your running jobs with the `squeue` command. The SLURM logs will be inside of your code directory, in a subfolder named 'out-logs'. It can be useful to check them in case of errors. + +## 4. Results and plots + +The spatial pattern and time series plots that were requested are saved inside `plots/Indices/` in the output directory. There will be one set of plots for each El Niño index, with a descriptive file name providing information about the content of the plot, the system/reference datasets, the start date and the forecast time. Here are some examples of the results: + +Spatial correlation for the ensemble mean: + +![](./figures/nino34_correlation_tos_ensmean_ECMWF-SEAS5_s1101_ftime02.png) +![](./figures/nino34_correlation_tos_ensmean_Meteo-France-System7_s1101_ftime02.png) + +Time series comparison between the model and the reference dataset (ERA5): +![](./figures/nino34_ECMWF-SEAS5_ERA5_s1101_ftime02.png) +![](./figures/nino34_Meteo-France-System7_ERA5_s1101_ftime02.png) diff --git a/use_cases/ex1_3_nino_indices_comparison/ex1_3-recipe.yml b/use_cases/ex1_3_nino_indices_comparison/ex1_3-recipe.yml new file mode 100644 index 0000000000000000000000000000000000000000..a4231e6a2403201b4e9b9d9963bd8ba99d6bd52c --- /dev/null +++ b/use_cases/ex1_3_nino_indices_comparison/ex1_3-recipe.yml @@ -0,0 +1,56 @@ +Description: + Author: V. Agudetse + Info: Computing El Nino indices for ECMWF SEAS5 and MeteoFrance System7 + +Analysis: + Horizon: seasonal + Variables: + - {name: tos, freq: monthly_mean, units: K} + Datasets: + System: + - {name: ECMWF-SEAS5} + - {name: Meteo-France-System7} + Multimodel: no + Reference: + - {name: ERA5} + Time: + sdate: '1101' + fcst_year: + hcst_start: '1993' + hcst_end: '2016' + ftime_min: 2 + ftime_max: 4 + Region: + latmin: -90 + latmax: 90 + lonmin: 0 + lonmax: 359.9 + Regrid: + method: bilinear + type: to_system + Workflow: + Calibration: + method: mse_min + save: none + Anomalies: + compute: yes + cross_validation: no + save: none + Indices: + Nino1+2: {save: all, plot_ts: yes, plot_sp: yes} + Nino3: {save: all, plot_ts: yes, plot_sp: yes} + Nino3.4: {save: all, plot_ts: yes, plot_sp: yes} + Nino4: {save: all, plot_ts: yes, plot_sp: yes} + Skill: + metric: mean_bias EnsCorr rps rpss crps crpss EnsSprErr + save: 'all' + ncores: 8 + remove_NAs: yes + Output_format: S2S4E + logo: TRUE +Run: + Loglevel: INFO + Terminal: yes + output_dir: /esarchive/scratch/vagudets/auto-s2s-outputs/ # ______ + code_dir: /esarchive/scratch/vagudets/repos/auto-s2s/ # _____ + autosubmit: no diff --git a/use_cases/ex1_3_nino_indices_comparison/ex1_3-script.R b/use_cases/ex1_3_nino_indices_comparison/ex1_3-script.R new file mode 100644 index 0000000000000000000000000000000000000000..c2b0ba341a015ef4440f653a8c2638cc0fe6619f --- /dev/null +++ b/use_cases/ex1_3_nino_indices_comparison/ex1_3-script.R @@ -0,0 +1,37 @@ +############################################################################### +## Author: Núria Pérez-Zanón and Victòria Agudetse Roures +## Description: Computes the Niño1+2, Niño3, Niño3.4 and Niño4 indices and some +## skill metrics for each index. +## Instructions: To run it, follow the steps described in: +## use_cases/ex1_3_nino_indices_comparison/ex1_3-handson.md +############################################################################### + +# Load modules +source("modules/Loading/Loading.R") +source("modules/Units/Units.R") +source("modules/Anomalies/Anomalies.R") +source("modules/Indices/Indices.R") +source("modules/Skill/Skill.R") + +# Read recipe +args = commandArgs(trailingOnly = TRUE) +recipe_file <- args[1] +recipe <- read_atomic_recipe(recipe_file) + +# Load data +data <- Loading(recipe) +# Check units and transform if needed +data <- Units(recipe, data) +# Calibrate data +# data <- Calibration(recipe, data) +# Compute tos anomalies +data <- Anomalies(recipe, data) +# Compute Niño Indices +nino_indices <- Indices(data = data, recipe = recipe) + +# We can compute the Skill metrics for each of the El Niño indices, +# specifying that the data is spatially aggregated, with the parameter +# agg = "region". +for (index in nino_indices) { + skill_metrics <- Skill(recipe = recipe, data = index, agg = "region") +} diff --git a/use_cases/ex1_3_nino_indices_comparison/figures/nino34_ECMWF-SEAS5_ERA5_s1101_ftime02.png b/use_cases/ex1_3_nino_indices_comparison/figures/nino34_ECMWF-SEAS5_ERA5_s1101_ftime02.png new file mode 100644 index 0000000000000000000000000000000000000000..b14e72311586239292d447c238442a9b9f2957f5 Binary files /dev/null and b/use_cases/ex1_3_nino_indices_comparison/figures/nino34_ECMWF-SEAS5_ERA5_s1101_ftime02.png differ diff --git a/use_cases/ex1_3_nino_indices_comparison/figures/nino34_Meteo-France-System7_ERA5_s1101_ftime02.png b/use_cases/ex1_3_nino_indices_comparison/figures/nino34_Meteo-France-System7_ERA5_s1101_ftime02.png new file mode 100644 index 0000000000000000000000000000000000000000..696de30e0a7a883bce952897b47c82223cb0d186 Binary files /dev/null and b/use_cases/ex1_3_nino_indices_comparison/figures/nino34_Meteo-France-System7_ERA5_s1101_ftime02.png differ diff --git a/use_cases/ex1_3_nino_indices_comparison/figures/nino34_correlation_tos_ensmean_ECMWF-SEAS5_s1101_ftime02.png b/use_cases/ex1_3_nino_indices_comparison/figures/nino34_correlation_tos_ensmean_ECMWF-SEAS5_s1101_ftime02.png new file mode 100644 index 0000000000000000000000000000000000000000..5b8af9efa6878dc51e4fea01bab2a240bf1ec2b4 Binary files /dev/null and b/use_cases/ex1_3_nino_indices_comparison/figures/nino34_correlation_tos_ensmean_ECMWF-SEAS5_s1101_ftime02.png differ diff --git a/use_cases/ex1_3_nino_indices_comparison/figures/nino34_correlation_tos_ensmean_Meteo-France-System7_s1101_ftime02.png b/use_cases/ex1_3_nino_indices_comparison/figures/nino34_correlation_tos_ensmean_Meteo-France-System7_s1101_ftime02.png new file mode 100644 index 0000000000000000000000000000000000000000..39cb7dcde131f31f3169d5e25ed0f5bd972c2abf Binary files /dev/null and b/use_cases/ex1_3_nino_indices_comparison/figures/nino34_correlation_tos_ensmean_Meteo-France-System7_s1101_ftime02.png differ